diff options
Diffstat (limited to 'drivers')
74 files changed, 3765 insertions, 652 deletions
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c index 6b94fb7be5f2..00c46e0b40e4 100644 --- a/drivers/amba/bus.c +++ b/drivers/amba/bus.c | |||
@@ -12,9 +12,10 @@ | |||
12 | #include <linux/device.h> | 12 | #include <linux/device.h> |
13 | #include <linux/string.h> | 13 | #include <linux/string.h> |
14 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
15 | #include <linux/io.h> | ||
15 | #include <linux/amba/bus.h> | 16 | #include <linux/amba/bus.h> |
16 | 17 | ||
17 | #include <asm/io.h> | 18 | #include <asm/irq.h> |
18 | #include <asm/sizes.h> | 19 | #include <asm/sizes.h> |
19 | 20 | ||
20 | #define to_amba_device(d) container_of(d, struct amba_device, dev) | 21 | #define to_amba_device(d) container_of(d, struct amba_device, dev) |
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 656448c7fef9..7f701cbe14ab 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
@@ -105,7 +105,7 @@ enum { | |||
105 | board_ahci_ign_iferr = 2, | 105 | board_ahci_ign_iferr = 2, |
106 | board_ahci_sb600 = 3, | 106 | board_ahci_sb600 = 3, |
107 | board_ahci_mv = 4, | 107 | board_ahci_mv = 4, |
108 | board_ahci_sb700 = 5, | 108 | board_ahci_sb700 = 5, /* for SB700 and SB800 */ |
109 | board_ahci_mcp65 = 6, | 109 | board_ahci_mcp65 = 6, |
110 | board_ahci_nopmp = 7, | 110 | board_ahci_nopmp = 7, |
111 | 111 | ||
@@ -439,7 +439,7 @@ static const struct ata_port_info ahci_port_info[] = { | |||
439 | .udma_mask = ATA_UDMA6, | 439 | .udma_mask = ATA_UDMA6, |
440 | .port_ops = &ahci_ops, | 440 | .port_ops = &ahci_ops, |
441 | }, | 441 | }, |
442 | /* board_ahci_sb700 */ | 442 | /* board_ahci_sb700, for SB700 and SB800 */ |
443 | { | 443 | { |
444 | AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL), | 444 | AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL), |
445 | .flags = AHCI_FLAG_COMMON, | 445 | .flags = AHCI_FLAG_COMMON, |
@@ -2446,6 +2446,8 @@ static void ahci_print_info(struct ata_host *host) | |||
2446 | speed_s = "1.5"; | 2446 | speed_s = "1.5"; |
2447 | else if (speed == 2) | 2447 | else if (speed == 2) |
2448 | speed_s = "3"; | 2448 | speed_s = "3"; |
2449 | else if (speed == 3) | ||
2450 | speed_s = "6"; | ||
2449 | else | 2451 | else |
2450 | speed_s = "?"; | 2452 | speed_s = "?"; |
2451 | 2453 | ||
@@ -2610,6 +2612,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2610 | (pdev->revision == 0xa1 || pdev->revision == 0xa2)) | 2612 | (pdev->revision == 0xa1 || pdev->revision == 0xa2)) |
2611 | hpriv->flags |= AHCI_HFLAG_NO_MSI; | 2613 | hpriv->flags |= AHCI_HFLAG_NO_MSI; |
2612 | 2614 | ||
2615 | /* SB800 does NOT need the workaround to ignore SERR_INTERNAL */ | ||
2616 | if (board_id == board_ahci_sb700 && pdev->revision >= 0x40) | ||
2617 | hpriv->flags &= ~AHCI_HFLAG_IGN_SERR_INTERNAL; | ||
2618 | |||
2613 | if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev)) | 2619 | if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev)) |
2614 | pci_intx(pdev, 1); | 2620 | pci_intx(pdev, 1); |
2615 | 2621 | ||
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index 5fdf1678d0cc..887d8f46a287 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c | |||
@@ -154,11 +154,13 @@ struct piix_map_db { | |||
154 | 154 | ||
155 | struct piix_host_priv { | 155 | struct piix_host_priv { |
156 | const int *map; | 156 | const int *map; |
157 | u32 saved_iocfg; | ||
157 | void __iomem *sidpr; | 158 | void __iomem *sidpr; |
158 | }; | 159 | }; |
159 | 160 | ||
160 | static int piix_init_one(struct pci_dev *pdev, | 161 | static int piix_init_one(struct pci_dev *pdev, |
161 | const struct pci_device_id *ent); | 162 | const struct pci_device_id *ent); |
163 | static void piix_remove_one(struct pci_dev *pdev); | ||
162 | static int piix_pata_prereset(struct ata_link *link, unsigned long deadline); | 164 | static int piix_pata_prereset(struct ata_link *link, unsigned long deadline); |
163 | static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev); | 165 | static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev); |
164 | static void piix_set_dmamode(struct ata_port *ap, struct ata_device *adev); | 166 | static void piix_set_dmamode(struct ata_port *ap, struct ata_device *adev); |
@@ -296,7 +298,7 @@ static struct pci_driver piix_pci_driver = { | |||
296 | .name = DRV_NAME, | 298 | .name = DRV_NAME, |
297 | .id_table = piix_pci_tbl, | 299 | .id_table = piix_pci_tbl, |
298 | .probe = piix_init_one, | 300 | .probe = piix_init_one, |
299 | .remove = ata_pci_remove_one, | 301 | .remove = piix_remove_one, |
300 | #ifdef CONFIG_PM | 302 | #ifdef CONFIG_PM |
301 | .suspend = piix_pci_device_suspend, | 303 | .suspend = piix_pci_device_suspend, |
302 | .resume = piix_pci_device_resume, | 304 | .resume = piix_pci_device_resume, |
@@ -308,7 +310,7 @@ static struct scsi_host_template piix_sht = { | |||
308 | }; | 310 | }; |
309 | 311 | ||
310 | static struct ata_port_operations piix_pata_ops = { | 312 | static struct ata_port_operations piix_pata_ops = { |
311 | .inherits = &ata_bmdma_port_ops, | 313 | .inherits = &ata_bmdma32_port_ops, |
312 | .cable_detect = ata_cable_40wire, | 314 | .cable_detect = ata_cable_40wire, |
313 | .set_piomode = piix_set_piomode, | 315 | .set_piomode = piix_set_piomode, |
314 | .set_dmamode = piix_set_dmamode, | 316 | .set_dmamode = piix_set_dmamode, |
@@ -610,8 +612,9 @@ static const struct ich_laptop ich_laptop[] = { | |||
610 | static int ich_pata_cable_detect(struct ata_port *ap) | 612 | static int ich_pata_cable_detect(struct ata_port *ap) |
611 | { | 613 | { |
612 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | 614 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); |
615 | struct piix_host_priv *hpriv = ap->host->private_data; | ||
613 | const struct ich_laptop *lap = &ich_laptop[0]; | 616 | const struct ich_laptop *lap = &ich_laptop[0]; |
614 | u8 tmp, mask; | 617 | u8 mask; |
615 | 618 | ||
616 | /* Check for specials - Acer Aspire 5602WLMi */ | 619 | /* Check for specials - Acer Aspire 5602WLMi */ |
617 | while (lap->device) { | 620 | while (lap->device) { |
@@ -625,8 +628,7 @@ static int ich_pata_cable_detect(struct ata_port *ap) | |||
625 | 628 | ||
626 | /* check BIOS cable detect results */ | 629 | /* check BIOS cable detect results */ |
627 | mask = ap->port_no == 0 ? PIIX_80C_PRI : PIIX_80C_SEC; | 630 | mask = ap->port_no == 0 ? PIIX_80C_PRI : PIIX_80C_SEC; |
628 | pci_read_config_byte(pdev, PIIX_IOCFG, &tmp); | 631 | if ((hpriv->saved_iocfg & mask) == 0) |
629 | if ((tmp & mask) == 0) | ||
630 | return ATA_CBL_PATA40; | 632 | return ATA_CBL_PATA40; |
631 | return ATA_CBL_PATA80; | 633 | return ATA_CBL_PATA80; |
632 | } | 634 | } |
@@ -1350,7 +1352,7 @@ static int __devinit piix_init_sidpr(struct ata_host *host) | |||
1350 | return 0; | 1352 | return 0; |
1351 | } | 1353 | } |
1352 | 1354 | ||
1353 | static void piix_iocfg_bit18_quirk(struct pci_dev *pdev) | 1355 | static void piix_iocfg_bit18_quirk(struct ata_host *host) |
1354 | { | 1356 | { |
1355 | static const struct dmi_system_id sysids[] = { | 1357 | static const struct dmi_system_id sysids[] = { |
1356 | { | 1358 | { |
@@ -1367,7 +1369,8 @@ static void piix_iocfg_bit18_quirk(struct pci_dev *pdev) | |||
1367 | 1369 | ||
1368 | { } /* terminate list */ | 1370 | { } /* terminate list */ |
1369 | }; | 1371 | }; |
1370 | u32 iocfg; | 1372 | struct pci_dev *pdev = to_pci_dev(host->dev); |
1373 | struct piix_host_priv *hpriv = host->private_data; | ||
1371 | 1374 | ||
1372 | if (!dmi_check_system(sysids)) | 1375 | if (!dmi_check_system(sysids)) |
1373 | return; | 1376 | return; |
@@ -1376,12 +1379,11 @@ static void piix_iocfg_bit18_quirk(struct pci_dev *pdev) | |||
1376 | * seem to use it to disable a channel. Clear the bit on the | 1379 | * seem to use it to disable a channel. Clear the bit on the |
1377 | * affected systems. | 1380 | * affected systems. |
1378 | */ | 1381 | */ |
1379 | pci_read_config_dword(pdev, PIIX_IOCFG, &iocfg); | 1382 | if (hpriv->saved_iocfg & (1 << 18)) { |
1380 | if (iocfg & (1 << 18)) { | ||
1381 | dev_printk(KERN_INFO, &pdev->dev, | 1383 | dev_printk(KERN_INFO, &pdev->dev, |
1382 | "applying IOCFG bit18 quirk\n"); | 1384 | "applying IOCFG bit18 quirk\n"); |
1383 | iocfg &= ~(1 << 18); | 1385 | pci_write_config_dword(pdev, PIIX_IOCFG, |
1384 | pci_write_config_dword(pdev, PIIX_IOCFG, iocfg); | 1386 | hpriv->saved_iocfg & ~(1 << 18)); |
1385 | } | 1387 | } |
1386 | } | 1388 | } |
1387 | 1389 | ||
@@ -1430,6 +1432,17 @@ static int __devinit piix_init_one(struct pci_dev *pdev, | |||
1430 | if (rc) | 1432 | if (rc) |
1431 | return rc; | 1433 | return rc; |
1432 | 1434 | ||
1435 | hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); | ||
1436 | if (!hpriv) | ||
1437 | return -ENOMEM; | ||
1438 | |||
1439 | /* Save IOCFG, this will be used for cable detection, quirk | ||
1440 | * detection and restoration on detach. This is necessary | ||
1441 | * because some ACPI implementations mess up cable related | ||
1442 | * bits on _STM. Reported on kernel bz#11879. | ||
1443 | */ | ||
1444 | pci_read_config_dword(pdev, PIIX_IOCFG, &hpriv->saved_iocfg); | ||
1445 | |||
1433 | /* ICH6R may be driven by either ata_piix or ahci driver | 1446 | /* ICH6R may be driven by either ata_piix or ahci driver |
1434 | * regardless of BIOS configuration. Make sure AHCI mode is | 1447 | * regardless of BIOS configuration. Make sure AHCI mode is |
1435 | * off. | 1448 | * off. |
@@ -1441,10 +1454,6 @@ static int __devinit piix_init_one(struct pci_dev *pdev, | |||
1441 | } | 1454 | } |
1442 | 1455 | ||
1443 | /* SATA map init can change port_info, do it before prepping host */ | 1456 | /* SATA map init can change port_info, do it before prepping host */ |
1444 | hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); | ||
1445 | if (!hpriv) | ||
1446 | return -ENOMEM; | ||
1447 | |||
1448 | if (port_flags & ATA_FLAG_SATA) | 1457 | if (port_flags & ATA_FLAG_SATA) |
1449 | hpriv->map = piix_init_sata_map(pdev, port_info, | 1458 | hpriv->map = piix_init_sata_map(pdev, port_info, |
1450 | piix_map_db_table[ent->driver_data]); | 1459 | piix_map_db_table[ent->driver_data]); |
@@ -1463,7 +1472,7 @@ static int __devinit piix_init_one(struct pci_dev *pdev, | |||
1463 | } | 1472 | } |
1464 | 1473 | ||
1465 | /* apply IOCFG bit18 quirk */ | 1474 | /* apply IOCFG bit18 quirk */ |
1466 | piix_iocfg_bit18_quirk(pdev); | 1475 | piix_iocfg_bit18_quirk(host); |
1467 | 1476 | ||
1468 | /* On ICH5, some BIOSen disable the interrupt using the | 1477 | /* On ICH5, some BIOSen disable the interrupt using the |
1469 | * PCI_COMMAND_INTX_DISABLE bit added in PCI 2.3. | 1478 | * PCI_COMMAND_INTX_DISABLE bit added in PCI 2.3. |
@@ -1488,6 +1497,16 @@ static int __devinit piix_init_one(struct pci_dev *pdev, | |||
1488 | return ata_pci_sff_activate_host(host, ata_sff_interrupt, &piix_sht); | 1497 | return ata_pci_sff_activate_host(host, ata_sff_interrupt, &piix_sht); |
1489 | } | 1498 | } |
1490 | 1499 | ||
1500 | static void piix_remove_one(struct pci_dev *pdev) | ||
1501 | { | ||
1502 | struct ata_host *host = dev_get_drvdata(&pdev->dev); | ||
1503 | struct piix_host_priv *hpriv = host->private_data; | ||
1504 | |||
1505 | pci_write_config_dword(pdev, PIIX_IOCFG, hpriv->saved_iocfg); | ||
1506 | |||
1507 | ata_pci_remove_one(pdev); | ||
1508 | } | ||
1509 | |||
1491 | static int __init piix_init(void) | 1510 | static int __init piix_init(void) |
1492 | { | 1511 | { |
1493 | int rc; | 1512 | int rc; |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index f178a450ec08..175df54eb664 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -1007,6 +1007,7 @@ static const char *sata_spd_string(unsigned int spd) | |||
1007 | static const char * const spd_str[] = { | 1007 | static const char * const spd_str[] = { |
1008 | "1.5 Gbps", | 1008 | "1.5 Gbps", |
1009 | "3.0 Gbps", | 1009 | "3.0 Gbps", |
1010 | "6.0 Gbps", | ||
1010 | }; | 1011 | }; |
1011 | 1012 | ||
1012 | if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str)) | 1013 | if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str)) |
@@ -2000,6 +2001,10 @@ unsigned int ata_pio_need_iordy(const struct ata_device *adev) | |||
2000 | as the caller should know this */ | 2001 | as the caller should know this */ |
2001 | if (adev->link->ap->flags & ATA_FLAG_NO_IORDY) | 2002 | if (adev->link->ap->flags & ATA_FLAG_NO_IORDY) |
2002 | return 0; | 2003 | return 0; |
2004 | /* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */ | ||
2005 | if (ata_id_is_cfa(adev->id) | ||
2006 | && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6)) | ||
2007 | return 0; | ||
2003 | /* PIO3 and higher it is mandatory */ | 2008 | /* PIO3 and higher it is mandatory */ |
2004 | if (adev->pio_mode > XFER_PIO_2) | 2009 | if (adev->pio_mode > XFER_PIO_2) |
2005 | return 1; | 2010 | return 1; |
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 9033d164c4ec..c59ad76c84b1 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c | |||
@@ -66,6 +66,7 @@ const struct ata_port_operations ata_sff_port_ops = { | |||
66 | 66 | ||
67 | .port_start = ata_sff_port_start, | 67 | .port_start = ata_sff_port_start, |
68 | }; | 68 | }; |
69 | EXPORT_SYMBOL_GPL(ata_sff_port_ops); | ||
69 | 70 | ||
70 | const struct ata_port_operations ata_bmdma_port_ops = { | 71 | const struct ata_port_operations ata_bmdma_port_ops = { |
71 | .inherits = &ata_sff_port_ops, | 72 | .inherits = &ata_sff_port_ops, |
@@ -77,6 +78,14 @@ const struct ata_port_operations ata_bmdma_port_ops = { | |||
77 | .bmdma_stop = ata_bmdma_stop, | 78 | .bmdma_stop = ata_bmdma_stop, |
78 | .bmdma_status = ata_bmdma_status, | 79 | .bmdma_status = ata_bmdma_status, |
79 | }; | 80 | }; |
81 | EXPORT_SYMBOL_GPL(ata_bmdma_port_ops); | ||
82 | |||
83 | const struct ata_port_operations ata_bmdma32_port_ops = { | ||
84 | .inherits = &ata_bmdma_port_ops, | ||
85 | |||
86 | .sff_data_xfer = ata_sff_data_xfer32, | ||
87 | }; | ||
88 | EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops); | ||
80 | 89 | ||
81 | /** | 90 | /** |
82 | * ata_fill_sg - Fill PCI IDE PRD table | 91 | * ata_fill_sg - Fill PCI IDE PRD table |
@@ -166,8 +175,9 @@ static void ata_fill_sg_dumb(struct ata_queued_cmd *qc) | |||
166 | blen = len & 0xffff; | 175 | blen = len & 0xffff; |
167 | ap->prd[pi].addr = cpu_to_le32(addr); | 176 | ap->prd[pi].addr = cpu_to_le32(addr); |
168 | if (blen == 0) { | 177 | if (blen == 0) { |
169 | /* Some PATA chipsets like the CS5530 can't | 178 | /* Some PATA chipsets like the CS5530 can't |
170 | cope with 0x0000 meaning 64K as the spec says */ | 179 | cope with 0x0000 meaning 64K as the spec |
180 | says */ | ||
171 | ap->prd[pi].flags_len = cpu_to_le32(0x8000); | 181 | ap->prd[pi].flags_len = cpu_to_le32(0x8000); |
172 | blen = 0x8000; | 182 | blen = 0x8000; |
173 | ap->prd[++pi].addr = cpu_to_le32(addr + 0x8000); | 183 | ap->prd[++pi].addr = cpu_to_le32(addr + 0x8000); |
@@ -200,6 +210,7 @@ void ata_sff_qc_prep(struct ata_queued_cmd *qc) | |||
200 | 210 | ||
201 | ata_fill_sg(qc); | 211 | ata_fill_sg(qc); |
202 | } | 212 | } |
213 | EXPORT_SYMBOL_GPL(ata_sff_qc_prep); | ||
203 | 214 | ||
204 | /** | 215 | /** |
205 | * ata_sff_dumb_qc_prep - Prepare taskfile for submission | 216 | * ata_sff_dumb_qc_prep - Prepare taskfile for submission |
@@ -217,6 +228,7 @@ void ata_sff_dumb_qc_prep(struct ata_queued_cmd *qc) | |||
217 | 228 | ||
218 | ata_fill_sg_dumb(qc); | 229 | ata_fill_sg_dumb(qc); |
219 | } | 230 | } |
231 | EXPORT_SYMBOL_GPL(ata_sff_dumb_qc_prep); | ||
220 | 232 | ||
221 | /** | 233 | /** |
222 | * ata_sff_check_status - Read device status reg & clear interrupt | 234 | * ata_sff_check_status - Read device status reg & clear interrupt |
@@ -233,6 +245,7 @@ u8 ata_sff_check_status(struct ata_port *ap) | |||
233 | { | 245 | { |
234 | return ioread8(ap->ioaddr.status_addr); | 246 | return ioread8(ap->ioaddr.status_addr); |
235 | } | 247 | } |
248 | EXPORT_SYMBOL_GPL(ata_sff_check_status); | ||
236 | 249 | ||
237 | /** | 250 | /** |
238 | * ata_sff_altstatus - Read device alternate status reg | 251 | * ata_sff_altstatus - Read device alternate status reg |
@@ -275,7 +288,7 @@ static u8 ata_sff_irq_status(struct ata_port *ap) | |||
275 | status = ata_sff_altstatus(ap); | 288 | status = ata_sff_altstatus(ap); |
276 | /* Not us: We are busy */ | 289 | /* Not us: We are busy */ |
277 | if (status & ATA_BUSY) | 290 | if (status & ATA_BUSY) |
278 | return status; | 291 | return status; |
279 | } | 292 | } |
280 | /* Clear INTRQ latch */ | 293 | /* Clear INTRQ latch */ |
281 | status = ap->ops->sff_check_status(ap); | 294 | status = ap->ops->sff_check_status(ap); |
@@ -319,6 +332,7 @@ void ata_sff_pause(struct ata_port *ap) | |||
319 | ata_sff_sync(ap); | 332 | ata_sff_sync(ap); |
320 | ndelay(400); | 333 | ndelay(400); |
321 | } | 334 | } |
335 | EXPORT_SYMBOL_GPL(ata_sff_pause); | ||
322 | 336 | ||
323 | /** | 337 | /** |
324 | * ata_sff_dma_pause - Pause before commencing DMA | 338 | * ata_sff_dma_pause - Pause before commencing DMA |
@@ -327,7 +341,7 @@ void ata_sff_pause(struct ata_port *ap) | |||
327 | * Perform I/O fencing and ensure sufficient cycle delays occur | 341 | * Perform I/O fencing and ensure sufficient cycle delays occur |
328 | * for the HDMA1:0 transition | 342 | * for the HDMA1:0 transition |
329 | */ | 343 | */ |
330 | 344 | ||
331 | void ata_sff_dma_pause(struct ata_port *ap) | 345 | void ata_sff_dma_pause(struct ata_port *ap) |
332 | { | 346 | { |
333 | if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) { | 347 | if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) { |
@@ -341,6 +355,7 @@ void ata_sff_dma_pause(struct ata_port *ap) | |||
341 | corruption. */ | 355 | corruption. */ |
342 | BUG(); | 356 | BUG(); |
343 | } | 357 | } |
358 | EXPORT_SYMBOL_GPL(ata_sff_dma_pause); | ||
344 | 359 | ||
345 | /** | 360 | /** |
346 | * ata_sff_busy_sleep - sleep until BSY clears, or timeout | 361 | * ata_sff_busy_sleep - sleep until BSY clears, or timeout |
@@ -396,6 +411,7 @@ int ata_sff_busy_sleep(struct ata_port *ap, | |||
396 | 411 | ||
397 | return 0; | 412 | return 0; |
398 | } | 413 | } |
414 | EXPORT_SYMBOL_GPL(ata_sff_busy_sleep); | ||
399 | 415 | ||
400 | static int ata_sff_check_ready(struct ata_link *link) | 416 | static int ata_sff_check_ready(struct ata_link *link) |
401 | { | 417 | { |
@@ -422,6 +438,7 @@ int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline) | |||
422 | { | 438 | { |
423 | return ata_wait_ready(link, deadline, ata_sff_check_ready); | 439 | return ata_wait_ready(link, deadline, ata_sff_check_ready); |
424 | } | 440 | } |
441 | EXPORT_SYMBOL_GPL(ata_sff_wait_ready); | ||
425 | 442 | ||
426 | /** | 443 | /** |
427 | * ata_sff_dev_select - Select device 0/1 on ATA bus | 444 | * ata_sff_dev_select - Select device 0/1 on ATA bus |
@@ -449,6 +466,7 @@ void ata_sff_dev_select(struct ata_port *ap, unsigned int device) | |||
449 | iowrite8(tmp, ap->ioaddr.device_addr); | 466 | iowrite8(tmp, ap->ioaddr.device_addr); |
450 | ata_sff_pause(ap); /* needed; also flushes, for mmio */ | 467 | ata_sff_pause(ap); /* needed; also flushes, for mmio */ |
451 | } | 468 | } |
469 | EXPORT_SYMBOL_GPL(ata_sff_dev_select); | ||
452 | 470 | ||
453 | /** | 471 | /** |
454 | * ata_dev_select - Select device 0/1 on ATA bus | 472 | * ata_dev_select - Select device 0/1 on ATA bus |
@@ -513,6 +531,7 @@ u8 ata_sff_irq_on(struct ata_port *ap) | |||
513 | 531 | ||
514 | return tmp; | 532 | return tmp; |
515 | } | 533 | } |
534 | EXPORT_SYMBOL_GPL(ata_sff_irq_on); | ||
516 | 535 | ||
517 | /** | 536 | /** |
518 | * ata_sff_irq_clear - Clear PCI IDE BMDMA interrupt. | 537 | * ata_sff_irq_clear - Clear PCI IDE BMDMA interrupt. |
@@ -534,6 +553,7 @@ void ata_sff_irq_clear(struct ata_port *ap) | |||
534 | 553 | ||
535 | iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS); | 554 | iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS); |
536 | } | 555 | } |
556 | EXPORT_SYMBOL_GPL(ata_sff_irq_clear); | ||
537 | 557 | ||
538 | /** | 558 | /** |
539 | * ata_sff_tf_load - send taskfile registers to host controller | 559 | * ata_sff_tf_load - send taskfile registers to host controller |
@@ -593,6 +613,7 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) | |||
593 | 613 | ||
594 | ata_wait_idle(ap); | 614 | ata_wait_idle(ap); |
595 | } | 615 | } |
616 | EXPORT_SYMBOL_GPL(ata_sff_tf_load); | ||
596 | 617 | ||
597 | /** | 618 | /** |
598 | * ata_sff_tf_read - input device's ATA taskfile shadow registers | 619 | * ata_sff_tf_read - input device's ATA taskfile shadow registers |
@@ -633,6 +654,7 @@ void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf) | |||
633 | WARN_ON(1); | 654 | WARN_ON(1); |
634 | } | 655 | } |
635 | } | 656 | } |
657 | EXPORT_SYMBOL_GPL(ata_sff_tf_read); | ||
636 | 658 | ||
637 | /** | 659 | /** |
638 | * ata_sff_exec_command - issue ATA command to host controller | 660 | * ata_sff_exec_command - issue ATA command to host controller |
@@ -652,6 +674,7 @@ void ata_sff_exec_command(struct ata_port *ap, const struct ata_taskfile *tf) | |||
652 | iowrite8(tf->command, ap->ioaddr.command_addr); | 674 | iowrite8(tf->command, ap->ioaddr.command_addr); |
653 | ata_sff_pause(ap); | 675 | ata_sff_pause(ap); |
654 | } | 676 | } |
677 | EXPORT_SYMBOL_GPL(ata_sff_exec_command); | ||
655 | 678 | ||
656 | /** | 679 | /** |
657 | * ata_tf_to_host - issue ATA taskfile to host controller | 680 | * ata_tf_to_host - issue ATA taskfile to host controller |
@@ -717,6 +740,53 @@ unsigned int ata_sff_data_xfer(struct ata_device *dev, unsigned char *buf, | |||
717 | 740 | ||
718 | return words << 1; | 741 | return words << 1; |
719 | } | 742 | } |
743 | EXPORT_SYMBOL_GPL(ata_sff_data_xfer); | ||
744 | |||
745 | /** | ||
746 | * ata_sff_data_xfer32 - Transfer data by PIO | ||
747 | * @dev: device to target | ||
748 | * @buf: data buffer | ||
749 | * @buflen: buffer length | ||
750 | * @rw: read/write | ||
751 | * | ||
752 | * Transfer data from/to the device data register by PIO using 32bit | ||
753 | * I/O operations. | ||
754 | * | ||
755 | * LOCKING: | ||
756 | * Inherited from caller. | ||
757 | * | ||
758 | * RETURNS: | ||
759 | * Bytes consumed. | ||
760 | */ | ||
761 | |||
762 | unsigned int ata_sff_data_xfer32(struct ata_device *dev, unsigned char *buf, | ||
763 | unsigned int buflen, int rw) | ||
764 | { | ||
765 | struct ata_port *ap = dev->link->ap; | ||
766 | void __iomem *data_addr = ap->ioaddr.data_addr; | ||
767 | unsigned int words = buflen >> 2; | ||
768 | int slop = buflen & 3; | ||
769 | |||
770 | /* Transfer multiple of 4 bytes */ | ||
771 | if (rw == READ) | ||
772 | ioread32_rep(data_addr, buf, words); | ||
773 | else | ||
774 | iowrite32_rep(data_addr, buf, words); | ||
775 | |||
776 | if (unlikely(slop)) { | ||
777 | __le32 pad; | ||
778 | if (rw == READ) { | ||
779 | pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr)); | ||
780 | memcpy(buf + buflen - slop, &pad, slop); | ||
781 | } else { | ||
782 | memcpy(&pad, buf + buflen - slop, slop); | ||
783 | iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr); | ||
784 | } | ||
785 | words++; | ||
786 | } | ||
787 | return words << 2; | ||
788 | } | ||
789 | EXPORT_SYMBOL_GPL(ata_sff_data_xfer32); | ||
720 | 790 | ||
721 | /** | 791 | /** |
722 | * ata_sff_data_xfer_noirq - Transfer data by PIO | 792 | * ata_sff_data_xfer_noirq - Transfer data by PIO |
@@ -746,6 +816,7 @@ unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev, unsigned char *buf, | |||
746 | 816 | ||
747 | return consumed; | 817 | return consumed; |
748 | } | 818 | } |
819 | EXPORT_SYMBOL_GPL(ata_sff_data_xfer_noirq); | ||
749 | 820 | ||
750 | /** | 821 | /** |
751 | * ata_pio_sector - Transfer a sector of data. | 822 | * ata_pio_sector - Transfer a sector of data. |
@@ -922,13 +993,15 @@ next_sg: | |||
922 | buf = kmap_atomic(page, KM_IRQ0); | 993 | buf = kmap_atomic(page, KM_IRQ0); |
923 | 994 | ||
924 | /* do the actual data transfer */ | 995 | /* do the actual data transfer */ |
925 | consumed = ap->ops->sff_data_xfer(dev, buf + offset, count, rw); | 996 | consumed = ap->ops->sff_data_xfer(dev, buf + offset, |
997 | count, rw); | ||
926 | 998 | ||
927 | kunmap_atomic(buf, KM_IRQ0); | 999 | kunmap_atomic(buf, KM_IRQ0); |
928 | local_irq_restore(flags); | 1000 | local_irq_restore(flags); |
929 | } else { | 1001 | } else { |
930 | buf = page_address(page); | 1002 | buf = page_address(page); |
931 | consumed = ap->ops->sff_data_xfer(dev, buf + offset, count, rw); | 1003 | consumed = ap->ops->sff_data_xfer(dev, buf + offset, |
1004 | count, rw); | ||
932 | } | 1005 | } |
933 | 1006 | ||
934 | bytes -= min(bytes, consumed); | 1007 | bytes -= min(bytes, consumed); |
@@ -1013,18 +1086,19 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc) | |||
1013 | * RETURNS: | 1086 | * RETURNS: |
1014 | * 1 if ok in workqueue, 0 otherwise. | 1087 | * 1 if ok in workqueue, 0 otherwise. |
1015 | */ | 1088 | */ |
1016 | static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc) | 1089 | static inline int ata_hsm_ok_in_wq(struct ata_port *ap, |
1090 | struct ata_queued_cmd *qc) | ||
1017 | { | 1091 | { |
1018 | if (qc->tf.flags & ATA_TFLAG_POLLING) | 1092 | if (qc->tf.flags & ATA_TFLAG_POLLING) |
1019 | return 1; | 1093 | return 1; |
1020 | 1094 | ||
1021 | if (ap->hsm_task_state == HSM_ST_FIRST) { | 1095 | if (ap->hsm_task_state == HSM_ST_FIRST) { |
1022 | if (qc->tf.protocol == ATA_PROT_PIO && | 1096 | if (qc->tf.protocol == ATA_PROT_PIO && |
1023 | (qc->tf.flags & ATA_TFLAG_WRITE)) | 1097 | (qc->tf.flags & ATA_TFLAG_WRITE)) |
1024 | return 1; | 1098 | return 1; |
1025 | 1099 | ||
1026 | if (ata_is_atapi(qc->tf.protocol) && | 1100 | if (ata_is_atapi(qc->tf.protocol) && |
1027 | !(qc->dev->flags & ATA_DFLAG_CDB_INTR)) | 1101 | !(qc->dev->flags & ATA_DFLAG_CDB_INTR)) |
1028 | return 1; | 1102 | return 1; |
1029 | } | 1103 | } |
1030 | 1104 | ||
@@ -1338,6 +1412,7 @@ fsm_start: | |||
1338 | 1412 | ||
1339 | return poll_next; | 1413 | return poll_next; |
1340 | } | 1414 | } |
1415 | EXPORT_SYMBOL_GPL(ata_sff_hsm_move); | ||
1341 | 1416 | ||
1342 | void ata_pio_task(struct work_struct *work) | 1417 | void ata_pio_task(struct work_struct *work) |
1343 | { | 1418 | { |
@@ -1507,6 +1582,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) | |||
1507 | 1582 | ||
1508 | return 0; | 1583 | return 0; |
1509 | } | 1584 | } |
1585 | EXPORT_SYMBOL_GPL(ata_sff_qc_issue); | ||
1510 | 1586 | ||
1511 | /** | 1587 | /** |
1512 | * ata_sff_qc_fill_rtf - fill result TF using ->sff_tf_read | 1588 | * ata_sff_qc_fill_rtf - fill result TF using ->sff_tf_read |
@@ -1526,6 +1602,7 @@ bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc) | |||
1526 | qc->ap->ops->sff_tf_read(qc->ap, &qc->result_tf); | 1602 | qc->ap->ops->sff_tf_read(qc->ap, &qc->result_tf); |
1527 | return true; | 1603 | return true; |
1528 | } | 1604 | } |
1605 | EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf); | ||
1529 | 1606 | ||
1530 | /** | 1607 | /** |
1531 | * ata_sff_host_intr - Handle host interrupt for given (port, task) | 1608 | * ata_sff_host_intr - Handle host interrupt for given (port, task) |
@@ -1623,6 +1700,7 @@ idle_irq: | |||
1623 | #endif | 1700 | #endif |
1624 | return 0; /* irq not handled */ | 1701 | return 0; /* irq not handled */ |
1625 | } | 1702 | } |
1703 | EXPORT_SYMBOL_GPL(ata_sff_host_intr); | ||
1626 | 1704 | ||
1627 | /** | 1705 | /** |
1628 | * ata_sff_interrupt - Default ATA host interrupt handler | 1706 | * ata_sff_interrupt - Default ATA host interrupt handler |
@@ -1667,6 +1745,7 @@ irqreturn_t ata_sff_interrupt(int irq, void *dev_instance) | |||
1667 | 1745 | ||
1668 | return IRQ_RETVAL(handled); | 1746 | return IRQ_RETVAL(handled); |
1669 | } | 1747 | } |
1748 | EXPORT_SYMBOL_GPL(ata_sff_interrupt); | ||
1670 | 1749 | ||
1671 | /** | 1750 | /** |
1672 | * ata_sff_freeze - Freeze SFF controller port | 1751 | * ata_sff_freeze - Freeze SFF controller port |
@@ -1695,6 +1774,7 @@ void ata_sff_freeze(struct ata_port *ap) | |||
1695 | 1774 | ||
1696 | ap->ops->sff_irq_clear(ap); | 1775 | ap->ops->sff_irq_clear(ap); |
1697 | } | 1776 | } |
1777 | EXPORT_SYMBOL_GPL(ata_sff_freeze); | ||
1698 | 1778 | ||
1699 | /** | 1779 | /** |
1700 | * ata_sff_thaw - Thaw SFF controller port | 1780 | * ata_sff_thaw - Thaw SFF controller port |
@@ -1712,6 +1792,7 @@ void ata_sff_thaw(struct ata_port *ap) | |||
1712 | ap->ops->sff_irq_clear(ap); | 1792 | ap->ops->sff_irq_clear(ap); |
1713 | ap->ops->sff_irq_on(ap); | 1793 | ap->ops->sff_irq_on(ap); |
1714 | } | 1794 | } |
1795 | EXPORT_SYMBOL_GPL(ata_sff_thaw); | ||
1715 | 1796 | ||
1716 | /** | 1797 | /** |
1717 | * ata_sff_prereset - prepare SFF link for reset | 1798 | * ata_sff_prereset - prepare SFF link for reset |
@@ -1753,6 +1834,7 @@ int ata_sff_prereset(struct ata_link *link, unsigned long deadline) | |||
1753 | 1834 | ||
1754 | return 0; | 1835 | return 0; |
1755 | } | 1836 | } |
1837 | EXPORT_SYMBOL_GPL(ata_sff_prereset); | ||
1756 | 1838 | ||
1757 | /** | 1839 | /** |
1758 | * ata_devchk - PATA device presence detection | 1840 | * ata_devchk - PATA device presence detection |
@@ -1865,6 +1947,7 @@ unsigned int ata_sff_dev_classify(struct ata_device *dev, int present, | |||
1865 | 1947 | ||
1866 | return class; | 1948 | return class; |
1867 | } | 1949 | } |
1950 | EXPORT_SYMBOL_GPL(ata_sff_dev_classify); | ||
1868 | 1951 | ||
1869 | /** | 1952 | /** |
1870 | * ata_sff_wait_after_reset - wait for devices to become ready after reset | 1953 | * ata_sff_wait_after_reset - wait for devices to become ready after reset |
@@ -1941,6 +2024,7 @@ int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask, | |||
1941 | 2024 | ||
1942 | return ret; | 2025 | return ret; |
1943 | } | 2026 | } |
2027 | EXPORT_SYMBOL_GPL(ata_sff_wait_after_reset); | ||
1944 | 2028 | ||
1945 | static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask, | 2029 | static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask, |
1946 | unsigned long deadline) | 2030 | unsigned long deadline) |
@@ -2013,6 +2097,7 @@ int ata_sff_softreset(struct ata_link *link, unsigned int *classes, | |||
2013 | DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]); | 2097 | DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]); |
2014 | return 0; | 2098 | return 0; |
2015 | } | 2099 | } |
2100 | EXPORT_SYMBOL_GPL(ata_sff_softreset); | ||
2016 | 2101 | ||
2017 | /** | 2102 | /** |
2018 | * sata_sff_hardreset - reset host port via SATA phy reset | 2103 | * sata_sff_hardreset - reset host port via SATA phy reset |
@@ -2045,6 +2130,7 @@ int sata_sff_hardreset(struct ata_link *link, unsigned int *class, | |||
2045 | DPRINTK("EXIT, class=%u\n", *class); | 2130 | DPRINTK("EXIT, class=%u\n", *class); |
2046 | return rc; | 2131 | return rc; |
2047 | } | 2132 | } |
2133 | EXPORT_SYMBOL_GPL(sata_sff_hardreset); | ||
2048 | 2134 | ||
2049 | /** | 2135 | /** |
2050 | * ata_sff_postreset - SFF postreset callback | 2136 | * ata_sff_postreset - SFF postreset callback |
@@ -2080,6 +2166,7 @@ void ata_sff_postreset(struct ata_link *link, unsigned int *classes) | |||
2080 | if (ap->ioaddr.ctl_addr) | 2166 | if (ap->ioaddr.ctl_addr) |
2081 | iowrite8(ap->ctl, ap->ioaddr.ctl_addr); | 2167 | iowrite8(ap->ctl, ap->ioaddr.ctl_addr); |
2082 | } | 2168 | } |
2169 | EXPORT_SYMBOL_GPL(ata_sff_postreset); | ||
2083 | 2170 | ||
2084 | /** | 2171 | /** |
2085 | * ata_sff_error_handler - Stock error handler for BMDMA controller | 2172 | * ata_sff_error_handler - Stock error handler for BMDMA controller |
@@ -2152,6 +2239,7 @@ void ata_sff_error_handler(struct ata_port *ap) | |||
2152 | ata_do_eh(ap, ap->ops->prereset, softreset, hardreset, | 2239 | ata_do_eh(ap, ap->ops->prereset, softreset, hardreset, |
2153 | ap->ops->postreset); | 2240 | ap->ops->postreset); |
2154 | } | 2241 | } |
2242 | EXPORT_SYMBOL_GPL(ata_sff_error_handler); | ||
2155 | 2243 | ||
2156 | /** | 2244 | /** |
2157 | * ata_sff_post_internal_cmd - Stock post_internal_cmd for SFF controller | 2245 | * ata_sff_post_internal_cmd - Stock post_internal_cmd for SFF controller |
@@ -2174,6 +2262,7 @@ void ata_sff_post_internal_cmd(struct ata_queued_cmd *qc) | |||
2174 | 2262 | ||
2175 | spin_unlock_irqrestore(ap->lock, flags); | 2263 | spin_unlock_irqrestore(ap->lock, flags); |
2176 | } | 2264 | } |
2265 | EXPORT_SYMBOL_GPL(ata_sff_post_internal_cmd); | ||
2177 | 2266 | ||
2178 | /** | 2267 | /** |
2179 | * ata_sff_port_start - Set port up for dma. | 2268 | * ata_sff_port_start - Set port up for dma. |
@@ -2194,6 +2283,7 @@ int ata_sff_port_start(struct ata_port *ap) | |||
2194 | return ata_port_start(ap); | 2283 | return ata_port_start(ap); |
2195 | return 0; | 2284 | return 0; |
2196 | } | 2285 | } |
2286 | EXPORT_SYMBOL_GPL(ata_sff_port_start); | ||
2197 | 2287 | ||
2198 | /** | 2288 | /** |
2199 | * ata_sff_std_ports - initialize ioaddr with standard port offsets. | 2289 | * ata_sff_std_ports - initialize ioaddr with standard port offsets. |
@@ -2219,6 +2309,7 @@ void ata_sff_std_ports(struct ata_ioports *ioaddr) | |||
2219 | ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS; | 2309 | ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS; |
2220 | ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD; | 2310 | ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD; |
2221 | } | 2311 | } |
2312 | EXPORT_SYMBOL_GPL(ata_sff_std_ports); | ||
2222 | 2313 | ||
2223 | unsigned long ata_bmdma_mode_filter(struct ata_device *adev, | 2314 | unsigned long ata_bmdma_mode_filter(struct ata_device *adev, |
2224 | unsigned long xfer_mask) | 2315 | unsigned long xfer_mask) |
@@ -2230,6 +2321,7 @@ unsigned long ata_bmdma_mode_filter(struct ata_device *adev, | |||
2230 | xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); | 2321 | xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); |
2231 | return xfer_mask; | 2322 | return xfer_mask; |
2232 | } | 2323 | } |
2324 | EXPORT_SYMBOL_GPL(ata_bmdma_mode_filter); | ||
2233 | 2325 | ||
2234 | /** | 2326 | /** |
2235 | * ata_bmdma_setup - Set up PCI IDE BMDMA transaction | 2327 | * ata_bmdma_setup - Set up PCI IDE BMDMA transaction |
@@ -2258,6 +2350,7 @@ void ata_bmdma_setup(struct ata_queued_cmd *qc) | |||
2258 | /* issue r/w command */ | 2350 | /* issue r/w command */ |
2259 | ap->ops->sff_exec_command(ap, &qc->tf); | 2351 | ap->ops->sff_exec_command(ap, &qc->tf); |
2260 | } | 2352 | } |
2353 | EXPORT_SYMBOL_GPL(ata_bmdma_setup); | ||
2261 | 2354 | ||
2262 | /** | 2355 | /** |
2263 | * ata_bmdma_start - Start a PCI IDE BMDMA transaction | 2356 | * ata_bmdma_start - Start a PCI IDE BMDMA transaction |
@@ -2290,6 +2383,7 @@ void ata_bmdma_start(struct ata_queued_cmd *qc) | |||
2290 | * unneccessarily delayed for MMIO | 2383 | * unneccessarily delayed for MMIO |
2291 | */ | 2384 | */ |
2292 | } | 2385 | } |
2386 | EXPORT_SYMBOL_GPL(ata_bmdma_start); | ||
2293 | 2387 | ||
2294 | /** | 2388 | /** |
2295 | * ata_bmdma_stop - Stop PCI IDE BMDMA transfer | 2389 | * ata_bmdma_stop - Stop PCI IDE BMDMA transfer |
@@ -2314,6 +2408,7 @@ void ata_bmdma_stop(struct ata_queued_cmd *qc) | |||
2314 | /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ | 2408 | /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ |
2315 | ata_sff_dma_pause(ap); | 2409 | ata_sff_dma_pause(ap); |
2316 | } | 2410 | } |
2411 | EXPORT_SYMBOL_GPL(ata_bmdma_stop); | ||
2317 | 2412 | ||
2318 | /** | 2413 | /** |
2319 | * ata_bmdma_status - Read PCI IDE BMDMA status | 2414 | * ata_bmdma_status - Read PCI IDE BMDMA status |
@@ -2330,6 +2425,7 @@ u8 ata_bmdma_status(struct ata_port *ap) | |||
2330 | { | 2425 | { |
2331 | return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); | 2426 | return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); |
2332 | } | 2427 | } |
2428 | EXPORT_SYMBOL_GPL(ata_bmdma_status); | ||
2333 | 2429 | ||
2334 | /** | 2430 | /** |
2335 | * ata_bus_reset - reset host port and associated ATA channel | 2431 | * ata_bus_reset - reset host port and associated ATA channel |
@@ -2422,6 +2518,7 @@ err_out: | |||
2422 | 2518 | ||
2423 | DPRINTK("EXIT\n"); | 2519 | DPRINTK("EXIT\n"); |
2424 | } | 2520 | } |
2521 | EXPORT_SYMBOL_GPL(ata_bus_reset); | ||
2425 | 2522 | ||
2426 | #ifdef CONFIG_PCI | 2523 | #ifdef CONFIG_PCI |
2427 | 2524 | ||
@@ -2449,6 +2546,7 @@ int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev) | |||
2449 | return -EOPNOTSUPP; | 2546 | return -EOPNOTSUPP; |
2450 | return 0; | 2547 | return 0; |
2451 | } | 2548 | } |
2549 | EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex); | ||
2452 | 2550 | ||
2453 | /** | 2551 | /** |
2454 | * ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host | 2552 | * ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host |
@@ -2501,11 +2599,12 @@ int ata_pci_bmdma_init(struct ata_host *host) | |||
2501 | host->flags |= ATA_HOST_SIMPLEX; | 2599 | host->flags |= ATA_HOST_SIMPLEX; |
2502 | 2600 | ||
2503 | ata_port_desc(ap, "bmdma 0x%llx", | 2601 | ata_port_desc(ap, "bmdma 0x%llx", |
2504 | (unsigned long long)pci_resource_start(pdev, 4) + 8 * i); | 2602 | (unsigned long long)pci_resource_start(pdev, 4) + 8 * i); |
2505 | } | 2603 | } |
2506 | 2604 | ||
2507 | return 0; | 2605 | return 0; |
2508 | } | 2606 | } |
2607 | EXPORT_SYMBOL_GPL(ata_pci_bmdma_init); | ||
2509 | 2608 | ||
2510 | static int ata_resources_present(struct pci_dev *pdev, int port) | 2609 | static int ata_resources_present(struct pci_dev *pdev, int port) |
2511 | { | 2610 | { |
@@ -2513,7 +2612,7 @@ static int ata_resources_present(struct pci_dev *pdev, int port) | |||
2513 | 2612 | ||
2514 | /* Check the PCI resources for this channel are enabled */ | 2613 | /* Check the PCI resources for this channel are enabled */ |
2515 | port = port * 2; | 2614 | port = port * 2; |
2516 | for (i = 0; i < 2; i ++) { | 2615 | for (i = 0; i < 2; i++) { |
2517 | if (pci_resource_start(pdev, port + i) == 0 || | 2616 | if (pci_resource_start(pdev, port + i) == 0 || |
2518 | pci_resource_len(pdev, port + i) == 0) | 2617 | pci_resource_len(pdev, port + i) == 0) |
2519 | return 0; | 2618 | return 0; |
@@ -2598,6 +2697,7 @@ int ata_pci_sff_init_host(struct ata_host *host) | |||
2598 | 2697 | ||
2599 | return 0; | 2698 | return 0; |
2600 | } | 2699 | } |
2700 | EXPORT_SYMBOL_GPL(ata_pci_sff_init_host); | ||
2601 | 2701 | ||
2602 | /** | 2702 | /** |
2603 | * ata_pci_sff_prepare_host - helper to prepare native PCI ATA host | 2703 | * ata_pci_sff_prepare_host - helper to prepare native PCI ATA host |
@@ -2615,7 +2715,7 @@ int ata_pci_sff_init_host(struct ata_host *host) | |||
2615 | * 0 on success, -errno otherwise. | 2715 | * 0 on success, -errno otherwise. |
2616 | */ | 2716 | */ |
2617 | int ata_pci_sff_prepare_host(struct pci_dev *pdev, | 2717 | int ata_pci_sff_prepare_host(struct pci_dev *pdev, |
2618 | const struct ata_port_info * const * ppi, | 2718 | const struct ata_port_info * const *ppi, |
2619 | struct ata_host **r_host) | 2719 | struct ata_host **r_host) |
2620 | { | 2720 | { |
2621 | struct ata_host *host; | 2721 | struct ata_host *host; |
@@ -2645,17 +2745,18 @@ int ata_pci_sff_prepare_host(struct pci_dev *pdev, | |||
2645 | *r_host = host; | 2745 | *r_host = host; |
2646 | return 0; | 2746 | return 0; |
2647 | 2747 | ||
2648 | err_bmdma: | 2748 | err_bmdma: |
2649 | /* This is necessary because PCI and iomap resources are | 2749 | /* This is necessary because PCI and iomap resources are |
2650 | * merged and releasing the top group won't release the | 2750 | * merged and releasing the top group won't release the |
2651 | * acquired resources if some of those have been acquired | 2751 | * acquired resources if some of those have been acquired |
2652 | * before entering this function. | 2752 | * before entering this function. |
2653 | */ | 2753 | */ |
2654 | pcim_iounmap_regions(pdev, 0xf); | 2754 | pcim_iounmap_regions(pdev, 0xf); |
2655 | err_out: | 2755 | err_out: |
2656 | devres_release_group(&pdev->dev, NULL); | 2756 | devres_release_group(&pdev->dev, NULL); |
2657 | return rc; | 2757 | return rc; |
2658 | } | 2758 | } |
2759 | EXPORT_SYMBOL_GPL(ata_pci_sff_prepare_host); | ||
2659 | 2760 | ||
2660 | /** | 2761 | /** |
2661 | * ata_pci_sff_activate_host - start SFF host, request IRQ and register it | 2762 | * ata_pci_sff_activate_host - start SFF host, request IRQ and register it |
@@ -2741,7 +2842,7 @@ int ata_pci_sff_activate_host(struct ata_host *host, | |||
2741 | } | 2842 | } |
2742 | 2843 | ||
2743 | rc = ata_host_register(host, sht); | 2844 | rc = ata_host_register(host, sht); |
2744 | out: | 2845 | out: |
2745 | if (rc == 0) | 2846 | if (rc == 0) |
2746 | devres_remove_group(dev, NULL); | 2847 | devres_remove_group(dev, NULL); |
2747 | else | 2848 | else |
@@ -2749,6 +2850,7 @@ int ata_pci_sff_activate_host(struct ata_host *host, | |||
2749 | 2850 | ||
2750 | return rc; | 2851 | return rc; |
2751 | } | 2852 | } |
2853 | EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host); | ||
2752 | 2854 | ||
2753 | /** | 2855 | /** |
2754 | * ata_pci_sff_init_one - Initialize/register PCI IDE host controller | 2856 | * ata_pci_sff_init_one - Initialize/register PCI IDE host controller |
@@ -2776,7 +2878,7 @@ int ata_pci_sff_activate_host(struct ata_host *host, | |||
2776 | * Zero on success, negative on errno-based value on error. | 2878 | * Zero on success, negative on errno-based value on error. |
2777 | */ | 2879 | */ |
2778 | int ata_pci_sff_init_one(struct pci_dev *pdev, | 2880 | int ata_pci_sff_init_one(struct pci_dev *pdev, |
2779 | const struct ata_port_info * const * ppi, | 2881 | const struct ata_port_info * const *ppi, |
2780 | struct scsi_host_template *sht, void *host_priv) | 2882 | struct scsi_host_template *sht, void *host_priv) |
2781 | { | 2883 | { |
2782 | struct device *dev = &pdev->dev; | 2884 | struct device *dev = &pdev->dev; |
@@ -2815,7 +2917,7 @@ int ata_pci_sff_init_one(struct pci_dev *pdev, | |||
2815 | 2917 | ||
2816 | pci_set_master(pdev); | 2918 | pci_set_master(pdev); |
2817 | rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht); | 2919 | rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht); |
2818 | out: | 2920 | out: |
2819 | if (rc == 0) | 2921 | if (rc == 0) |
2820 | devres_remove_group(&pdev->dev, NULL); | 2922 | devres_remove_group(&pdev->dev, NULL); |
2821 | else | 2923 | else |
@@ -2823,54 +2925,7 @@ int ata_pci_sff_init_one(struct pci_dev *pdev, | |||
2823 | 2925 | ||
2824 | return rc; | 2926 | return rc; |
2825 | } | 2927 | } |
2928 | EXPORT_SYMBOL_GPL(ata_pci_sff_init_one); | ||
2826 | 2929 | ||
2827 | #endif /* CONFIG_PCI */ | 2930 | #endif /* CONFIG_PCI */ |
2828 | 2931 | ||
2829 | EXPORT_SYMBOL_GPL(ata_sff_port_ops); | ||
2830 | EXPORT_SYMBOL_GPL(ata_bmdma_port_ops); | ||
2831 | EXPORT_SYMBOL_GPL(ata_sff_qc_prep); | ||
2832 | EXPORT_SYMBOL_GPL(ata_sff_dumb_qc_prep); | ||
2833 | EXPORT_SYMBOL_GPL(ata_sff_dev_select); | ||
2834 | EXPORT_SYMBOL_GPL(ata_sff_check_status); | ||
2835 | EXPORT_SYMBOL_GPL(ata_sff_dma_pause); | ||
2836 | EXPORT_SYMBOL_GPL(ata_sff_pause); | ||
2837 | EXPORT_SYMBOL_GPL(ata_sff_busy_sleep); | ||
2838 | EXPORT_SYMBOL_GPL(ata_sff_wait_ready); | ||
2839 | EXPORT_SYMBOL_GPL(ata_sff_tf_load); | ||
2840 | EXPORT_SYMBOL_GPL(ata_sff_tf_read); | ||
2841 | EXPORT_SYMBOL_GPL(ata_sff_exec_command); | ||
2842 | EXPORT_SYMBOL_GPL(ata_sff_data_xfer); | ||
2843 | EXPORT_SYMBOL_GPL(ata_sff_data_xfer_noirq); | ||
2844 | EXPORT_SYMBOL_GPL(ata_sff_irq_on); | ||
2845 | EXPORT_SYMBOL_GPL(ata_sff_irq_clear); | ||
2846 | EXPORT_SYMBOL_GPL(ata_sff_hsm_move); | ||
2847 | EXPORT_SYMBOL_GPL(ata_sff_qc_issue); | ||
2848 | EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf); | ||
2849 | EXPORT_SYMBOL_GPL(ata_sff_host_intr); | ||
2850 | EXPORT_SYMBOL_GPL(ata_sff_interrupt); | ||
2851 | EXPORT_SYMBOL_GPL(ata_sff_freeze); | ||
2852 | EXPORT_SYMBOL_GPL(ata_sff_thaw); | ||
2853 | EXPORT_SYMBOL_GPL(ata_sff_prereset); | ||
2854 | EXPORT_SYMBOL_GPL(ata_sff_dev_classify); | ||
2855 | EXPORT_SYMBOL_GPL(ata_sff_wait_after_reset); | ||
2856 | EXPORT_SYMBOL_GPL(ata_sff_softreset); | ||
2857 | EXPORT_SYMBOL_GPL(sata_sff_hardreset); | ||
2858 | EXPORT_SYMBOL_GPL(ata_sff_postreset); | ||
2859 | EXPORT_SYMBOL_GPL(ata_sff_error_handler); | ||
2860 | EXPORT_SYMBOL_GPL(ata_sff_post_internal_cmd); | ||
2861 | EXPORT_SYMBOL_GPL(ata_sff_port_start); | ||
2862 | EXPORT_SYMBOL_GPL(ata_sff_std_ports); | ||
2863 | EXPORT_SYMBOL_GPL(ata_bmdma_mode_filter); | ||
2864 | EXPORT_SYMBOL_GPL(ata_bmdma_setup); | ||
2865 | EXPORT_SYMBOL_GPL(ata_bmdma_start); | ||
2866 | EXPORT_SYMBOL_GPL(ata_bmdma_stop); | ||
2867 | EXPORT_SYMBOL_GPL(ata_bmdma_status); | ||
2868 | EXPORT_SYMBOL_GPL(ata_bus_reset); | ||
2869 | #ifdef CONFIG_PCI | ||
2870 | EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex); | ||
2871 | EXPORT_SYMBOL_GPL(ata_pci_bmdma_init); | ||
2872 | EXPORT_SYMBOL_GPL(ata_pci_sff_init_host); | ||
2873 | EXPORT_SYMBOL_GPL(ata_pci_sff_prepare_host); | ||
2874 | EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host); | ||
2875 | EXPORT_SYMBOL_GPL(ata_pci_sff_init_one); | ||
2876 | #endif /* CONFIG_PCI */ | ||
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c index 73c466e452ca..a7999c19f0c9 100644 --- a/drivers/ata/pata_ali.c +++ b/drivers/ata/pata_ali.c | |||
@@ -19,7 +19,9 @@ | |||
19 | * | 19 | * |
20 | * TODO/CHECK | 20 | * TODO/CHECK |
21 | * Cannot have ATAPI on both master & slave for rev < c2 (???) but | 21 | * Cannot have ATAPI on both master & slave for rev < c2 (???) but |
22 | * otherwise should do atapi DMA. | 22 | * otherwise should do atapi DMA (For now for old we do PIO only for |
23 | * ATAPI) | ||
24 | * Review Sunblade workaround. | ||
23 | */ | 25 | */ |
24 | 26 | ||
25 | #include <linux/kernel.h> | 27 | #include <linux/kernel.h> |
@@ -33,12 +35,14 @@ | |||
33 | #include <linux/dmi.h> | 35 | #include <linux/dmi.h> |
34 | 36 | ||
35 | #define DRV_NAME "pata_ali" | 37 | #define DRV_NAME "pata_ali" |
36 | #define DRV_VERSION "0.7.5" | 38 | #define DRV_VERSION "0.7.8" |
37 | 39 | ||
38 | static int ali_atapi_dma = 0; | 40 | static int ali_atapi_dma = 0; |
39 | module_param_named(atapi_dma, ali_atapi_dma, int, 0644); | 41 | module_param_named(atapi_dma, ali_atapi_dma, int, 0644); |
40 | MODULE_PARM_DESC(atapi_dma, "Enable ATAPI DMA (0=disable, 1=enable)"); | 42 | MODULE_PARM_DESC(atapi_dma, "Enable ATAPI DMA (0=disable, 1=enable)"); |
41 | 43 | ||
44 | static struct pci_dev *isa_bridge; | ||
45 | |||
42 | /* | 46 | /* |
43 | * Cable special cases | 47 | * Cable special cases |
44 | */ | 48 | */ |
@@ -147,8 +151,7 @@ static void ali_fifo_control(struct ata_port *ap, struct ata_device *adev, int o | |||
147 | 151 | ||
148 | pci_read_config_byte(pdev, pio_fifo, &fifo); | 152 | pci_read_config_byte(pdev, pio_fifo, &fifo); |
149 | fifo &= ~(0x0F << shift); | 153 | fifo &= ~(0x0F << shift); |
150 | if (on) | 154 | fifo |= (on << shift); |
151 | fifo |= (on << shift); | ||
152 | pci_write_config_byte(pdev, pio_fifo, fifo); | 155 | pci_write_config_byte(pdev, pio_fifo, fifo); |
153 | } | 156 | } |
154 | 157 | ||
@@ -337,6 +340,23 @@ static int ali_check_atapi_dma(struct ata_queued_cmd *qc) | |||
337 | return 0; | 340 | return 0; |
338 | } | 341 | } |
339 | 342 | ||
343 | static void ali_c2_c3_postreset(struct ata_link *link, unsigned int *classes) | ||
344 | { | ||
345 | u8 r; | ||
346 | int port_bit = 4 << link->ap->port_no; | ||
347 | |||
348 | /* If our bridge is an ALI 1533 then do the extra work */ | ||
349 | if (isa_bridge) { | ||
350 | /* Tristate and re-enable the bus signals */ | ||
351 | pci_read_config_byte(isa_bridge, 0x58, &r); | ||
352 | r &= ~port_bit; | ||
353 | pci_write_config_byte(isa_bridge, 0x58, r); | ||
354 | r |= port_bit; | ||
355 | pci_write_config_byte(isa_bridge, 0x58, r); | ||
356 | } | ||
357 | ata_sff_postreset(link, classes); | ||
358 | } | ||
359 | |||
340 | static struct scsi_host_template ali_sht = { | 360 | static struct scsi_host_template ali_sht = { |
341 | ATA_BMDMA_SHT(DRV_NAME), | 361 | ATA_BMDMA_SHT(DRV_NAME), |
342 | }; | 362 | }; |
@@ -349,10 +369,11 @@ static struct ata_port_operations ali_early_port_ops = { | |||
349 | .inherits = &ata_sff_port_ops, | 369 | .inherits = &ata_sff_port_ops, |
350 | .cable_detect = ata_cable_40wire, | 370 | .cable_detect = ata_cable_40wire, |
351 | .set_piomode = ali_set_piomode, | 371 | .set_piomode = ali_set_piomode, |
372 | .sff_data_xfer = ata_sff_data_xfer32, | ||
352 | }; | 373 | }; |
353 | 374 | ||
354 | static const struct ata_port_operations ali_dma_base_ops = { | 375 | static const struct ata_port_operations ali_dma_base_ops = { |
355 | .inherits = &ata_bmdma_port_ops, | 376 | .inherits = &ata_bmdma32_port_ops, |
356 | .set_piomode = ali_set_piomode, | 377 | .set_piomode = ali_set_piomode, |
357 | .set_dmamode = ali_set_dmamode, | 378 | .set_dmamode = ali_set_dmamode, |
358 | }; | 379 | }; |
@@ -377,6 +398,17 @@ static struct ata_port_operations ali_c2_port_ops = { | |||
377 | .check_atapi_dma = ali_check_atapi_dma, | 398 | .check_atapi_dma = ali_check_atapi_dma, |
378 | .cable_detect = ali_c2_cable_detect, | 399 | .cable_detect = ali_c2_cable_detect, |
379 | .dev_config = ali_lock_sectors, | 400 | .dev_config = ali_lock_sectors, |
401 | .postreset = ali_c2_c3_postreset, | ||
402 | }; | ||
403 | |||
404 | /* | ||
405 | * Port operations for DMA capable ALi with cable detect | ||
406 | */ | ||
407 | static struct ata_port_operations ali_c4_port_ops = { | ||
408 | .inherits = &ali_dma_base_ops, | ||
409 | .check_atapi_dma = ali_check_atapi_dma, | ||
410 | .cable_detect = ali_c2_cable_detect, | ||
411 | .dev_config = ali_lock_sectors, | ||
380 | }; | 412 | }; |
381 | 413 | ||
382 | /* | 414 | /* |
@@ -401,52 +433,49 @@ static struct ata_port_operations ali_c5_port_ops = { | |||
401 | static void ali_init_chipset(struct pci_dev *pdev) | 433 | static void ali_init_chipset(struct pci_dev *pdev) |
402 | { | 434 | { |
403 | u8 tmp; | 435 | u8 tmp; |
404 | struct pci_dev *north, *isa_bridge; | 436 | struct pci_dev *north; |
405 | 437 | ||
406 | /* | 438 | /* |
407 | * The chipset revision selects the driver operations and | 439 | * The chipset revision selects the driver operations and |
408 | * mode data. | 440 | * mode data. |
409 | */ | 441 | */ |
410 | 442 | ||
411 | if (pdev->revision >= 0x20 && pdev->revision < 0xC2) { | 443 | if (pdev->revision <= 0x20) { |
412 | /* 1543-E/F, 1543C-C, 1543C-D, 1543C-E */ | 444 | pci_read_config_byte(pdev, 0x53, &tmp); |
413 | pci_read_config_byte(pdev, 0x4B, &tmp); | 445 | tmp |= 0x03; |
414 | /* Clear CD-ROM DMA write bit */ | 446 | pci_write_config_byte(pdev, 0x53, tmp); |
415 | tmp &= 0x7F; | 447 | } else { |
416 | pci_write_config_byte(pdev, 0x4B, tmp); | 448 | pci_read_config_byte(pdev, 0x4a, &tmp); |
417 | } else if (pdev->revision >= 0xC2) { | 449 | pci_write_config_byte(pdev, 0x4a, tmp | 0x20); |
418 | /* Enable cable detection logic */ | ||
419 | pci_read_config_byte(pdev, 0x4B, &tmp); | 450 | pci_read_config_byte(pdev, 0x4B, &tmp); |
420 | pci_write_config_byte(pdev, 0x4B, tmp | 0x08); | 451 | if (pdev->revision < 0xC2) |
421 | } | 452 | /* 1543-E/F, 1543C-C, 1543C-D, 1543C-E */ |
422 | north = pci_get_bus_and_slot(0, PCI_DEVFN(0,0)); | 453 | /* Clear CD-ROM DMA write bit */ |
423 | isa_bridge = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL); | 454 | tmp &= 0x7F; |
424 | 455 | /* Cable and UDMA */ | |
425 | if (north && north->vendor == PCI_VENDOR_ID_AL && isa_bridge) { | 456 | pci_write_config_byte(pdev, 0x4B, tmp | 0x09); |
426 | /* Configure the ALi bridge logic. For non ALi rely on BIOS. | ||
427 | Set the south bridge enable bit */ | ||
428 | pci_read_config_byte(isa_bridge, 0x79, &tmp); | ||
429 | if (pdev->revision == 0xC2) | ||
430 | pci_write_config_byte(isa_bridge, 0x79, tmp | 0x04); | ||
431 | else if (pdev->revision > 0xC2 && pdev->revision < 0xC5) | ||
432 | pci_write_config_byte(isa_bridge, 0x79, tmp | 0x02); | ||
433 | } | ||
434 | if (pdev->revision >= 0x20) { | ||
435 | /* | 457 | /* |
436 | * CD_ROM DMA on (0x53 bit 0). Enable this even if we want | 458 | * CD_ROM DMA on (0x53 bit 0). Enable this even if we want |
437 | * to use PIO. 0x53 bit 1 (rev 20 only) - enable FIFO control | 459 | * to use PIO. 0x53 bit 1 (rev 20 only) - enable FIFO control |
438 | * via 0x54/55. | 460 | * via 0x54/55. |
439 | */ | 461 | */ |
440 | pci_read_config_byte(pdev, 0x53, &tmp); | 462 | pci_read_config_byte(pdev, 0x53, &tmp); |
441 | if (pdev->revision <= 0x20) | ||
442 | tmp &= ~0x02; | ||
443 | if (pdev->revision >= 0xc7) | 463 | if (pdev->revision >= 0xc7) |
444 | tmp |= 0x03; | 464 | tmp |= 0x03; |
445 | else | 465 | else |
446 | tmp |= 0x01; /* CD_ROM enable for DMA */ | 466 | tmp |= 0x01; /* CD_ROM enable for DMA */ |
447 | pci_write_config_byte(pdev, 0x53, tmp); | 467 | pci_write_config_byte(pdev, 0x53, tmp); |
448 | } | 468 | } |
449 | pci_dev_put(isa_bridge); | 469 | north = pci_get_bus_and_slot(0, PCI_DEVFN(0,0)); |
470 | if (north && north->vendor == PCI_VENDOR_ID_AL && isa_bridge) { | ||
471 | /* Configure the ALi bridge logic. For non ALi rely on BIOS. | ||
472 | Set the south bridge enable bit */ | ||
473 | pci_read_config_byte(isa_bridge, 0x79, &tmp); | ||
474 | if (pdev->revision == 0xC2) | ||
475 | pci_write_config_byte(isa_bridge, 0x79, tmp | 0x04); | ||
476 | else if (pdev->revision > 0xC2 && pdev->revision < 0xC5) | ||
477 | pci_write_config_byte(isa_bridge, 0x79, tmp | 0x02); | ||
478 | } | ||
450 | pci_dev_put(north); | 479 | pci_dev_put(north); |
451 | ata_pci_bmdma_clear_simplex(pdev); | 480 | ata_pci_bmdma_clear_simplex(pdev); |
452 | } | 481 | } |
@@ -503,7 +532,7 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
503 | .pio_mask = 0x1f, | 532 | .pio_mask = 0x1f, |
504 | .mwdma_mask = 0x07, | 533 | .mwdma_mask = 0x07, |
505 | .udma_mask = ATA_UDMA5, | 534 | .udma_mask = ATA_UDMA5, |
506 | .port_ops = &ali_c2_port_ops | 535 | .port_ops = &ali_c4_port_ops |
507 | }; | 536 | }; |
508 | /* Revision 0xC5 is UDMA133 with LBA48 DMA */ | 537 | /* Revision 0xC5 is UDMA133 with LBA48 DMA */ |
509 | static const struct ata_port_info info_c5 = { | 538 | static const struct ata_port_info info_c5 = { |
@@ -516,7 +545,6 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
516 | 545 | ||
517 | const struct ata_port_info *ppi[] = { NULL, NULL }; | 546 | const struct ata_port_info *ppi[] = { NULL, NULL }; |
518 | u8 tmp; | 547 | u8 tmp; |
519 | struct pci_dev *isa_bridge; | ||
520 | int rc; | 548 | int rc; |
521 | 549 | ||
522 | rc = pcim_enable_device(pdev); | 550 | rc = pcim_enable_device(pdev); |
@@ -543,14 +571,12 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
543 | 571 | ||
544 | ali_init_chipset(pdev); | 572 | ali_init_chipset(pdev); |
545 | 573 | ||
546 | isa_bridge = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL); | ||
547 | if (isa_bridge && pdev->revision >= 0x20 && pdev->revision < 0xC2) { | 574 | if (isa_bridge && pdev->revision >= 0x20 && pdev->revision < 0xC2) { |
548 | /* Are we paired with a UDMA capable chip */ | 575 | /* Are we paired with a UDMA capable chip */ |
549 | pci_read_config_byte(isa_bridge, 0x5E, &tmp); | 576 | pci_read_config_byte(isa_bridge, 0x5E, &tmp); |
550 | if ((tmp & 0x1E) == 0x12) | 577 | if ((tmp & 0x1E) == 0x12) |
551 | ppi[0] = &info_20_udma; | 578 | ppi[0] = &info_20_udma; |
552 | } | 579 | } |
553 | pci_dev_put(isa_bridge); | ||
554 | 580 | ||
555 | return ata_pci_sff_init_one(pdev, ppi, &ali_sht, NULL); | 581 | return ata_pci_sff_init_one(pdev, ppi, &ali_sht, NULL); |
556 | } | 582 | } |
@@ -590,13 +616,20 @@ static struct pci_driver ali_pci_driver = { | |||
590 | 616 | ||
591 | static int __init ali_init(void) | 617 | static int __init ali_init(void) |
592 | { | 618 | { |
593 | return pci_register_driver(&ali_pci_driver); | 619 | int ret; |
620 | isa_bridge = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL); | ||
621 | |||
622 | ret = pci_register_driver(&ali_pci_driver); | ||
623 | if (ret < 0) | ||
624 | pci_dev_put(isa_bridge); | ||
625 | return ret; | ||
594 | } | 626 | } |
595 | 627 | ||
596 | 628 | ||
597 | static void __exit ali_exit(void) | 629 | static void __exit ali_exit(void) |
598 | { | 630 | { |
599 | pci_unregister_driver(&ali_pci_driver); | 631 | pci_unregister_driver(&ali_pci_driver); |
632 | pci_dev_put(isa_bridge); | ||
600 | } | 633 | } |
601 | 634 | ||
602 | 635 | ||
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c index 0ec9c7d9fe9d..63719ab9ea44 100644 --- a/drivers/ata/pata_amd.c +++ b/drivers/ata/pata_amd.c | |||
@@ -24,7 +24,7 @@ | |||
24 | #include <linux/libata.h> | 24 | #include <linux/libata.h> |
25 | 25 | ||
26 | #define DRV_NAME "pata_amd" | 26 | #define DRV_NAME "pata_amd" |
27 | #define DRV_VERSION "0.3.10" | 27 | #define DRV_VERSION "0.3.11" |
28 | 28 | ||
29 | /** | 29 | /** |
30 | * timing_setup - shared timing computation and load | 30 | * timing_setup - shared timing computation and load |
@@ -345,7 +345,7 @@ static struct scsi_host_template amd_sht = { | |||
345 | }; | 345 | }; |
346 | 346 | ||
347 | static const struct ata_port_operations amd_base_port_ops = { | 347 | static const struct ata_port_operations amd_base_port_ops = { |
348 | .inherits = &ata_bmdma_port_ops, | 348 | .inherits = &ata_bmdma32_port_ops, |
349 | .prereset = amd_pre_reset, | 349 | .prereset = amd_pre_reset, |
350 | }; | 350 | }; |
351 | 351 | ||
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c index e0c4f05d7d57..65c28e5a6cd7 100644 --- a/drivers/ata/pata_hpt366.c +++ b/drivers/ata/pata_hpt366.c | |||
@@ -30,7 +30,7 @@ | |||
30 | #define DRV_VERSION "0.6.2" | 30 | #define DRV_VERSION "0.6.2" |
31 | 31 | ||
32 | struct hpt_clock { | 32 | struct hpt_clock { |
33 | u8 xfer_speed; | 33 | u8 xfer_mode; |
34 | u32 timing; | 34 | u32 timing; |
35 | }; | 35 | }; |
36 | 36 | ||
@@ -189,28 +189,6 @@ static unsigned long hpt366_filter(struct ata_device *adev, unsigned long mask) | |||
189 | return ata_bmdma_mode_filter(adev, mask); | 189 | return ata_bmdma_mode_filter(adev, mask); |
190 | } | 190 | } |
191 | 191 | ||
192 | /** | ||
193 | * hpt36x_find_mode - reset the hpt36x bus | ||
194 | * @ap: ATA port | ||
195 | * @speed: transfer mode | ||
196 | * | ||
197 | * Return the 32bit register programming information for this channel | ||
198 | * that matches the speed provided. | ||
199 | */ | ||
200 | |||
201 | static u32 hpt36x_find_mode(struct ata_port *ap, int speed) | ||
202 | { | ||
203 | struct hpt_clock *clocks = ap->host->private_data; | ||
204 | |||
205 | while(clocks->xfer_speed) { | ||
206 | if (clocks->xfer_speed == speed) | ||
207 | return clocks->timing; | ||
208 | clocks++; | ||
209 | } | ||
210 | BUG(); | ||
211 | return 0xffffffffU; /* silence compiler warning */ | ||
212 | } | ||
213 | |||
214 | static int hpt36x_cable_detect(struct ata_port *ap) | 192 | static int hpt36x_cable_detect(struct ata_port *ap) |
215 | { | 193 | { |
216 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | 194 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); |
@@ -226,25 +204,16 @@ static int hpt36x_cable_detect(struct ata_port *ap) | |||
226 | return ATA_CBL_PATA80; | 204 | return ATA_CBL_PATA80; |
227 | } | 205 | } |
228 | 206 | ||
229 | /** | 207 | static void hpt366_set_mode(struct ata_port *ap, struct ata_device *adev, |
230 | * hpt366_set_piomode - PIO setup | 208 | u8 mode) |
231 | * @ap: ATA interface | ||
232 | * @adev: device on the interface | ||
233 | * | ||
234 | * Perform PIO mode setup. | ||
235 | */ | ||
236 | |||
237 | static void hpt366_set_piomode(struct ata_port *ap, struct ata_device *adev) | ||
238 | { | 209 | { |
210 | struct hpt_clock *clocks = ap->host->private_data; | ||
239 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | 211 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); |
240 | u32 addr1, addr2; | 212 | u32 addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no); |
241 | u32 reg; | 213 | u32 addr2 = 0x51 + 4 * ap->port_no; |
242 | u32 mode; | 214 | u32 mask, reg; |
243 | u8 fast; | 215 | u8 fast; |
244 | 216 | ||
245 | addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no); | ||
246 | addr2 = 0x51 + 4 * ap->port_no; | ||
247 | |||
248 | /* Fast interrupt prediction disable, hold off interrupt disable */ | 217 | /* Fast interrupt prediction disable, hold off interrupt disable */ |
249 | pci_read_config_byte(pdev, addr2, &fast); | 218 | pci_read_config_byte(pdev, addr2, &fast); |
250 | if (fast & 0x80) { | 219 | if (fast & 0x80) { |
@@ -252,12 +221,43 @@ static void hpt366_set_piomode(struct ata_port *ap, struct ata_device *adev) | |||
252 | pci_write_config_byte(pdev, addr2, fast); | 221 | pci_write_config_byte(pdev, addr2, fast); |
253 | } | 222 | } |
254 | 223 | ||
224 | /* determine timing mask and find matching clock entry */ | ||
225 | if (mode < XFER_MW_DMA_0) | ||
226 | mask = 0xc1f8ffff; | ||
227 | else if (mode < XFER_UDMA_0) | ||
228 | mask = 0x303800ff; | ||
229 | else | ||
230 | mask = 0x30070000; | ||
231 | |||
232 | while (clocks->xfer_mode) { | ||
233 | if (clocks->xfer_mode == mode) | ||
234 | break; | ||
235 | clocks++; | ||
236 | } | ||
237 | if (!clocks->xfer_mode) | ||
238 | BUG(); | ||
239 | |||
240 | /* | ||
241 | * Combine new mode bits with old config bits and disable | ||
242 | * on-chip PIO FIFO/buffer (and PIO MST mode as well) to avoid | ||
243 | * problems handling I/O errors later. | ||
244 | */ | ||
255 | pci_read_config_dword(pdev, addr1, ®); | 245 | pci_read_config_dword(pdev, addr1, ®); |
256 | mode = hpt36x_find_mode(ap, adev->pio_mode); | 246 | reg = ((reg & ~mask) | (clocks->timing & mask)) & ~0xc0000000; |
257 | mode &= ~0x8000000; /* No FIFO in PIO */ | 247 | pci_write_config_dword(pdev, addr1, reg); |
258 | mode &= ~0x30070000; /* Leave config bits alone */ | 248 | } |
259 | reg &= 0x30070000; /* Strip timing bits */ | 249 | |
260 | pci_write_config_dword(pdev, addr1, reg | mode); | 250 | /** |
251 | * hpt366_set_piomode - PIO setup | ||
252 | * @ap: ATA interface | ||
253 | * @adev: device on the interface | ||
254 | * | ||
255 | * Perform PIO mode setup. | ||
256 | */ | ||
257 | |||
258 | static void hpt366_set_piomode(struct ata_port *ap, struct ata_device *adev) | ||
259 | { | ||
260 | hpt366_set_mode(ap, adev, adev->pio_mode); | ||
261 | } | 261 | } |
262 | 262 | ||
263 | /** | 263 | /** |
@@ -271,28 +271,7 @@ static void hpt366_set_piomode(struct ata_port *ap, struct ata_device *adev) | |||
271 | 271 | ||
272 | static void hpt366_set_dmamode(struct ata_port *ap, struct ata_device *adev) | 272 | static void hpt366_set_dmamode(struct ata_port *ap, struct ata_device *adev) |
273 | { | 273 | { |
274 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | 274 | hpt366_set_mode(ap, adev, adev->dma_mode); |
275 | u32 addr1, addr2; | ||
276 | u32 reg; | ||
277 | u32 mode; | ||
278 | u8 fast; | ||
279 | |||
280 | addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no); | ||
281 | addr2 = 0x51 + 4 * ap->port_no; | ||
282 | |||
283 | /* Fast interrupt prediction disable, hold off interrupt disable */ | ||
284 | pci_read_config_byte(pdev, addr2, &fast); | ||
285 | if (fast & 0x80) { | ||
286 | fast &= ~0x80; | ||
287 | pci_write_config_byte(pdev, addr2, fast); | ||
288 | } | ||
289 | |||
290 | pci_read_config_dword(pdev, addr1, ®); | ||
291 | mode = hpt36x_find_mode(ap, adev->dma_mode); | ||
292 | mode |= 0x8000000; /* FIFO in MWDMA or UDMA */ | ||
293 | mode &= ~0xC0000000; /* Leave config bits alone */ | ||
294 | reg &= 0xC0000000; /* Strip timing bits */ | ||
295 | pci_write_config_dword(pdev, addr1, reg | mode); | ||
296 | } | 275 | } |
297 | 276 | ||
298 | static struct scsi_host_template hpt36x_sht = { | 277 | static struct scsi_host_template hpt36x_sht = { |
diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c index f11a320337c0..f19cc645881a 100644 --- a/drivers/ata/pata_hpt3x3.c +++ b/drivers/ata/pata_hpt3x3.c | |||
@@ -23,7 +23,7 @@ | |||
23 | #include <linux/libata.h> | 23 | #include <linux/libata.h> |
24 | 24 | ||
25 | #define DRV_NAME "pata_hpt3x3" | 25 | #define DRV_NAME "pata_hpt3x3" |
26 | #define DRV_VERSION "0.5.3" | 26 | #define DRV_VERSION "0.6.1" |
27 | 27 | ||
28 | /** | 28 | /** |
29 | * hpt3x3_set_piomode - PIO setup | 29 | * hpt3x3_set_piomode - PIO setup |
@@ -80,14 +80,48 @@ static void hpt3x3_set_dmamode(struct ata_port *ap, struct ata_device *adev) | |||
80 | r2 &= ~(0x11 << dn); /* Clear MWDMA and UDMA bits */ | 80 | r2 &= ~(0x11 << dn); /* Clear MWDMA and UDMA bits */ |
81 | 81 | ||
82 | if (adev->dma_mode >= XFER_UDMA_0) | 82 | if (adev->dma_mode >= XFER_UDMA_0) |
83 | r2 |= (0x10 << dn); /* Ultra mode */ | 83 | r2 |= (0x01 << dn); /* Ultra mode */ |
84 | else | 84 | else |
85 | r2 |= (0x01 << dn); /* MWDMA */ | 85 | r2 |= (0x10 << dn); /* MWDMA */ |
86 | 86 | ||
87 | pci_write_config_dword(pdev, 0x44, r1); | 87 | pci_write_config_dword(pdev, 0x44, r1); |
88 | pci_write_config_dword(pdev, 0x48, r2); | 88 | pci_write_config_dword(pdev, 0x48, r2); |
89 | } | 89 | } |
90 | #endif /* CONFIG_PATA_HPT3X3_DMA */ | 90 | |
91 | /** | ||
92 | * hpt3x3_freeze - DMA workaround | ||
93 | * @ap: port to freeze | ||
94 | * | ||
95 | * When freezing an HPT3x3 we must stop any pending DMA before | ||
96 | * writing to the control register or the chip will hang | ||
97 | */ | ||
98 | |||
99 | static void hpt3x3_freeze(struct ata_port *ap) | ||
100 | { | ||
101 | void __iomem *mmio = ap->ioaddr.bmdma_addr; | ||
102 | |||
103 | iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ ATA_DMA_START, | ||
104 | mmio + ATA_DMA_CMD); | ||
105 | ata_sff_dma_pause(ap); | ||
106 | ata_sff_freeze(ap); | ||
107 | } | ||
108 | |||
109 | /** | ||
110 | * hpt3x3_bmdma_setup - DMA workaround | ||
111 | * @qc: Queued command | ||
112 | * | ||
113 | * When issuing BMDMA we must clean up the error/active bits in | ||
114 | * software on this device | ||
115 | */ | ||
116 | |||
117 | static void hpt3x3_bmdma_setup(struct ata_queued_cmd *qc) | ||
118 | { | ||
119 | struct ata_port *ap = qc->ap; | ||
120 | u8 r = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); | ||
121 | r |= ATA_DMA_INTR | ATA_DMA_ERR; | ||
122 | iowrite8(r, ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); | ||
123 | return ata_bmdma_setup(qc); | ||
124 | } | ||
91 | 125 | ||
92 | /** | 126 | /** |
93 | * hpt3x3_atapi_dma - ATAPI DMA check | 127 | * hpt3x3_atapi_dma - ATAPI DMA check |
@@ -101,18 +135,23 @@ static int hpt3x3_atapi_dma(struct ata_queued_cmd *qc) | |||
101 | return 1; | 135 | return 1; |
102 | } | 136 | } |
103 | 137 | ||
138 | #endif /* CONFIG_PATA_HPT3X3_DMA */ | ||
139 | |||
104 | static struct scsi_host_template hpt3x3_sht = { | 140 | static struct scsi_host_template hpt3x3_sht = { |
105 | ATA_BMDMA_SHT(DRV_NAME), | 141 | ATA_BMDMA_SHT(DRV_NAME), |
106 | }; | 142 | }; |
107 | 143 | ||
108 | static struct ata_port_operations hpt3x3_port_ops = { | 144 | static struct ata_port_operations hpt3x3_port_ops = { |
109 | .inherits = &ata_bmdma_port_ops, | 145 | .inherits = &ata_bmdma_port_ops, |
110 | .check_atapi_dma= hpt3x3_atapi_dma, | ||
111 | .cable_detect = ata_cable_40wire, | 146 | .cable_detect = ata_cable_40wire, |
112 | .set_piomode = hpt3x3_set_piomode, | 147 | .set_piomode = hpt3x3_set_piomode, |
113 | #if defined(CONFIG_PATA_HPT3X3_DMA) | 148 | #if defined(CONFIG_PATA_HPT3X3_DMA) |
114 | .set_dmamode = hpt3x3_set_dmamode, | 149 | .set_dmamode = hpt3x3_set_dmamode, |
150 | .bmdma_setup = hpt3x3_bmdma_setup, | ||
151 | .check_atapi_dma= hpt3x3_atapi_dma, | ||
152 | .freeze = hpt3x3_freeze, | ||
115 | #endif | 153 | #endif |
154 | |||
116 | }; | 155 | }; |
117 | 156 | ||
118 | /** | 157 | /** |
diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c index 7c8faa48b5f3..aa576cac4d17 100644 --- a/drivers/ata/pata_mpiix.c +++ b/drivers/ata/pata_mpiix.c | |||
@@ -35,7 +35,7 @@ | |||
35 | #include <linux/libata.h> | 35 | #include <linux/libata.h> |
36 | 36 | ||
37 | #define DRV_NAME "pata_mpiix" | 37 | #define DRV_NAME "pata_mpiix" |
38 | #define DRV_VERSION "0.7.6" | 38 | #define DRV_VERSION "0.7.7" |
39 | 39 | ||
40 | enum { | 40 | enum { |
41 | IDETIM = 0x6C, /* IDE control register */ | 41 | IDETIM = 0x6C, /* IDE control register */ |
@@ -146,6 +146,7 @@ static struct ata_port_operations mpiix_port_ops = { | |||
146 | .cable_detect = ata_cable_40wire, | 146 | .cable_detect = ata_cable_40wire, |
147 | .set_piomode = mpiix_set_piomode, | 147 | .set_piomode = mpiix_set_piomode, |
148 | .prereset = mpiix_pre_reset, | 148 | .prereset = mpiix_pre_reset, |
149 | .sff_data_xfer = ata_sff_data_xfer32, | ||
149 | }; | 150 | }; |
150 | 151 | ||
151 | static int mpiix_init_one(struct pci_dev *dev, const struct pci_device_id *id) | 152 | static int mpiix_init_one(struct pci_dev *dev, const struct pci_device_id *id) |
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c index 6afa07a37648..d8d743af3225 100644 --- a/drivers/ata/pata_platform.c +++ b/drivers/ata/pata_platform.c | |||
@@ -186,7 +186,7 @@ EXPORT_SYMBOL_GPL(__pata_platform_probe); | |||
186 | * A platform bus ATA device has been unplugged. Perform the needed | 186 | * A platform bus ATA device has been unplugged. Perform the needed |
187 | * cleanup. Also called on module unload for any active devices. | 187 | * cleanup. Also called on module unload for any active devices. |
188 | */ | 188 | */ |
189 | int __devexit __pata_platform_remove(struct device *dev) | 189 | int __pata_platform_remove(struct device *dev) |
190 | { | 190 | { |
191 | struct ata_host *host = dev_get_drvdata(dev); | 191 | struct ata_host *host = dev_get_drvdata(dev); |
192 | 192 | ||
diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c index 83580a59db58..9e764e5747e6 100644 --- a/drivers/ata/pata_sil680.c +++ b/drivers/ata/pata_sil680.c | |||
@@ -32,7 +32,7 @@ | |||
32 | #include <linux/libata.h> | 32 | #include <linux/libata.h> |
33 | 33 | ||
34 | #define DRV_NAME "pata_sil680" | 34 | #define DRV_NAME "pata_sil680" |
35 | #define DRV_VERSION "0.4.8" | 35 | #define DRV_VERSION "0.4.9" |
36 | 36 | ||
37 | #define SIL680_MMIO_BAR 5 | 37 | #define SIL680_MMIO_BAR 5 |
38 | 38 | ||
@@ -195,7 +195,7 @@ static struct scsi_host_template sil680_sht = { | |||
195 | }; | 195 | }; |
196 | 196 | ||
197 | static struct ata_port_operations sil680_port_ops = { | 197 | static struct ata_port_operations sil680_port_ops = { |
198 | .inherits = &ata_bmdma_port_ops, | 198 | .inherits = &ata_bmdma32_port_ops, |
199 | .cable_detect = sil680_cable_detect, | 199 | .cable_detect = sil680_cable_detect, |
200 | .set_piomode = sil680_set_piomode, | 200 | .set_piomode = sil680_set_piomode, |
201 | .set_dmamode = sil680_set_dmamode, | 201 | .set_dmamode = sil680_set_dmamode, |
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c index ccee930f1e12..2590c2279fa7 100644 --- a/drivers/ata/sata_sil24.c +++ b/drivers/ata/sata_sil24.c | |||
@@ -51,13 +51,6 @@ struct sil24_sge { | |||
51 | __le32 flags; | 51 | __le32 flags; |
52 | }; | 52 | }; |
53 | 53 | ||
54 | /* | ||
55 | * Port multiplier | ||
56 | */ | ||
57 | struct sil24_port_multiplier { | ||
58 | __le32 diag; | ||
59 | __le32 sactive; | ||
60 | }; | ||
61 | 54 | ||
62 | enum { | 55 | enum { |
63 | SIL24_HOST_BAR = 0, | 56 | SIL24_HOST_BAR = 0, |
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c index 088885ed51b9..e1c7611e9144 100644 --- a/drivers/atm/iphase.c +++ b/drivers/atm/iphase.c | |||
@@ -64,7 +64,7 @@ | |||
64 | #include <linux/jiffies.h> | 64 | #include <linux/jiffies.h> |
65 | #include "iphase.h" | 65 | #include "iphase.h" |
66 | #include "suni.h" | 66 | #include "suni.h" |
67 | #define swap(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8)) | 67 | #define swap_byte_order(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8)) |
68 | 68 | ||
69 | #define PRIV(dev) ((struct suni_priv *) dev->phy_data) | 69 | #define PRIV(dev) ((struct suni_priv *) dev->phy_data) |
70 | 70 | ||
@@ -1306,7 +1306,7 @@ static void rx_dle_intr(struct atm_dev *dev) | |||
1306 | // get real pkt length pwang_test | 1306 | // get real pkt length pwang_test |
1307 | trailer = (struct cpcs_trailer*)((u_char *)skb->data + | 1307 | trailer = (struct cpcs_trailer*)((u_char *)skb->data + |
1308 | skb->len - sizeof(*trailer)); | 1308 | skb->len - sizeof(*trailer)); |
1309 | length = swap(trailer->length); | 1309 | length = swap_byte_order(trailer->length); |
1310 | if ((length > iadev->rx_buf_sz) || (length > | 1310 | if ((length > iadev->rx_buf_sz) || (length > |
1311 | (skb->len - sizeof(struct cpcs_trailer)))) | 1311 | (skb->len - sizeof(struct cpcs_trailer)))) |
1312 | { | 1312 | { |
@@ -2995,7 +2995,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) { | |||
2995 | skb->len, PCI_DMA_TODEVICE); | 2995 | skb->len, PCI_DMA_TODEVICE); |
2996 | wr_ptr->local_pkt_addr = (buf_desc_ptr->buf_start_hi << 16) | | 2996 | wr_ptr->local_pkt_addr = (buf_desc_ptr->buf_start_hi << 16) | |
2997 | buf_desc_ptr->buf_start_lo; | 2997 | buf_desc_ptr->buf_start_lo; |
2998 | /* wr_ptr->bytes = swap(total_len); didn't seem to affect ?? */ | 2998 | /* wr_ptr->bytes = swap_byte_order(total_len); didn't seem to affect?? */ |
2999 | wr_ptr->bytes = skb->len; | 2999 | wr_ptr->bytes = skb->len; |
3000 | 3000 | ||
3001 | /* hw bug - DLEs of 0x2d, 0x2e, 0x2f cause DMA lockup */ | 3001 | /* hw bug - DLEs of 0x2d, 0x2e, 0x2f cause DMA lockup */ |
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 35914b6e1d2a..f5be8081cd81 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig | |||
@@ -616,6 +616,7 @@ config HVC_ISERIES | |||
616 | default y | 616 | default y |
617 | select HVC_DRIVER | 617 | select HVC_DRIVER |
618 | select HVC_IRQ | 618 | select HVC_IRQ |
619 | select VIOPATH | ||
619 | help | 620 | help |
620 | iSeries machines support a hypervisor virtual console. | 621 | iSeries machines support a hypervisor virtual console. |
621 | 622 | ||
diff --git a/drivers/char/hvc_beat.c b/drivers/char/hvc_beat.c index 91cdb35a9204..0afc8b82212e 100644 --- a/drivers/char/hvc_beat.c +++ b/drivers/char/hvc_beat.c | |||
@@ -44,7 +44,7 @@ static int hvc_beat_get_chars(uint32_t vtermno, char *buf, int cnt) | |||
44 | static unsigned char q[sizeof(unsigned long) * 2] | 44 | static unsigned char q[sizeof(unsigned long) * 2] |
45 | __attribute__((aligned(sizeof(unsigned long)))); | 45 | __attribute__((aligned(sizeof(unsigned long)))); |
46 | static int qlen = 0; | 46 | static int qlen = 0; |
47 | unsigned long got; | 47 | u64 got; |
48 | 48 | ||
49 | again: | 49 | again: |
50 | if (qlen) { | 50 | if (qlen) { |
@@ -63,7 +63,7 @@ again: | |||
63 | } | 63 | } |
64 | } | 64 | } |
65 | if (beat_get_term_char(vtermno, &got, | 65 | if (beat_get_term_char(vtermno, &got, |
66 | ((unsigned long *)q), ((unsigned long *)q) + 1) == 0) { | 66 | ((u64 *)q), ((u64 *)q) + 1) == 0) { |
67 | qlen = got; | 67 | qlen = got; |
68 | goto again; | 68 | goto again; |
69 | } | 69 | } |
diff --git a/drivers/char/pty.c b/drivers/char/pty.c index 112a6ba9a96f..146c97613da0 100644 --- a/drivers/char/pty.c +++ b/drivers/char/pty.c | |||
@@ -32,7 +32,7 @@ | |||
32 | 32 | ||
33 | /* These are global because they are accessed in tty_io.c */ | 33 | /* These are global because they are accessed in tty_io.c */ |
34 | #ifdef CONFIG_UNIX98_PTYS | 34 | #ifdef CONFIG_UNIX98_PTYS |
35 | struct tty_driver *ptm_driver; | 35 | static struct tty_driver *ptm_driver; |
36 | static struct tty_driver *pts_driver; | 36 | static struct tty_driver *pts_driver; |
37 | #endif | 37 | #endif |
38 | 38 | ||
diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c index ab18c1e7b115..70efba2ee053 100644 --- a/drivers/char/tpm/tpm_nsc.c +++ b/drivers/char/tpm/tpm_nsc.c | |||
@@ -273,12 +273,23 @@ static void tpm_nsc_remove(struct device *dev) | |||
273 | } | 273 | } |
274 | } | 274 | } |
275 | 275 | ||
276 | static struct device_driver nsc_drv = { | 276 | static int tpm_nsc_suspend(struct platform_device *dev, pm_message_t msg) |
277 | .name = "tpm_nsc", | 277 | { |
278 | .bus = &platform_bus_type, | 278 | return tpm_pm_suspend(&dev->dev, msg); |
279 | .owner = THIS_MODULE, | 279 | } |
280 | .suspend = tpm_pm_suspend, | 280 | |
281 | .resume = tpm_pm_resume, | 281 | static int tpm_nsc_resume(struct platform_device *dev) |
282 | { | ||
283 | return tpm_pm_resume(&dev->dev); | ||
284 | } | ||
285 | |||
286 | static struct platform_driver nsc_drv = { | ||
287 | .suspend = tpm_nsc_suspend, | ||
288 | .resume = tpm_nsc_resume, | ||
289 | .driver = { | ||
290 | .name = "tpm_nsc", | ||
291 | .owner = THIS_MODULE, | ||
292 | }, | ||
282 | }; | 293 | }; |
283 | 294 | ||
284 | static int __init init_nsc(void) | 295 | static int __init init_nsc(void) |
@@ -297,7 +308,7 @@ static int __init init_nsc(void) | |||
297 | return -ENODEV; | 308 | return -ENODEV; |
298 | } | 309 | } |
299 | 310 | ||
300 | err = driver_register(&nsc_drv); | 311 | err = platform_driver_register(&nsc_drv); |
301 | if (err) | 312 | if (err) |
302 | return err; | 313 | return err; |
303 | 314 | ||
@@ -308,17 +319,15 @@ static int __init init_nsc(void) | |||
308 | /* enable the DPM module */ | 319 | /* enable the DPM module */ |
309 | tpm_write_index(nscAddrBase, NSC_LDC_INDEX, 0x01); | 320 | tpm_write_index(nscAddrBase, NSC_LDC_INDEX, 0x01); |
310 | 321 | ||
311 | pdev = kzalloc(sizeof(struct platform_device), GFP_KERNEL); | 322 | pdev = platform_device_alloc("tpm_nscl0", -1); |
312 | if (!pdev) { | 323 | if (!pdev) { |
313 | rc = -ENOMEM; | 324 | rc = -ENOMEM; |
314 | goto err_unreg_drv; | 325 | goto err_unreg_drv; |
315 | } | 326 | } |
316 | 327 | ||
317 | pdev->name = "tpm_nscl0"; | ||
318 | pdev->id = -1; | ||
319 | pdev->num_resources = 0; | 328 | pdev->num_resources = 0; |
329 | pdev->dev.driver = &nsc_drv.driver; | ||
320 | pdev->dev.release = tpm_nsc_remove; | 330 | pdev->dev.release = tpm_nsc_remove; |
321 | pdev->dev.driver = &nsc_drv; | ||
322 | 331 | ||
323 | if ((rc = platform_device_register(pdev)) < 0) | 332 | if ((rc = platform_device_register(pdev)) < 0) |
324 | goto err_free_dev; | 333 | goto err_free_dev; |
@@ -377,7 +386,7 @@ err_unreg_dev: | |||
377 | err_free_dev: | 386 | err_free_dev: |
378 | kfree(pdev); | 387 | kfree(pdev); |
379 | err_unreg_drv: | 388 | err_unreg_drv: |
380 | driver_unregister(&nsc_drv); | 389 | platform_driver_unregister(&nsc_drv); |
381 | return rc; | 390 | return rc; |
382 | } | 391 | } |
383 | 392 | ||
@@ -390,7 +399,7 @@ static void __exit cleanup_nsc(void) | |||
390 | pdev = NULL; | 399 | pdev = NULL; |
391 | } | 400 | } |
392 | 401 | ||
393 | driver_unregister(&nsc_drv); | 402 | platform_driver_unregister(&nsc_drv); |
394 | } | 403 | } |
395 | 404 | ||
396 | module_init(init_nsc); | 405 | module_init(init_nsc); |
diff --git a/drivers/char/vt.c b/drivers/char/vt.c index 80014213fb53..7900bd63b36d 100644 --- a/drivers/char/vt.c +++ b/drivers/char/vt.c | |||
@@ -969,8 +969,7 @@ int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int rows) | |||
969 | * Takes the console sem and the called methods then take the tty | 969 | * Takes the console sem and the called methods then take the tty |
970 | * termios_mutex and the tty ctrl_lock in that order. | 970 | * termios_mutex and the tty ctrl_lock in that order. |
971 | */ | 971 | */ |
972 | 972 | static int vt_resize(struct tty_struct *tty, struct winsize *ws) | |
973 | int vt_resize(struct tty_struct *tty, struct winsize *ws) | ||
974 | { | 973 | { |
975 | struct vc_data *vc = tty->driver_data; | 974 | struct vc_data *vc = tty->driver_data; |
976 | int ret; | 975 | int ret; |
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c index 50a071f1c945..777fba48d2d3 100644 --- a/drivers/firmware/dcdbas.c +++ b/drivers/firmware/dcdbas.c | |||
@@ -238,11 +238,11 @@ static ssize_t host_control_on_shutdown_store(struct device *dev, | |||
238 | } | 238 | } |
239 | 239 | ||
240 | /** | 240 | /** |
241 | * smi_request: generate SMI request | 241 | * dcdbas_smi_request: generate SMI request |
242 | * | 242 | * |
243 | * Called with smi_data_lock. | 243 | * Called with smi_data_lock. |
244 | */ | 244 | */ |
245 | static int smi_request(struct smi_cmd *smi_cmd) | 245 | int dcdbas_smi_request(struct smi_cmd *smi_cmd) |
246 | { | 246 | { |
247 | cpumask_t old_mask; | 247 | cpumask_t old_mask; |
248 | int ret = 0; | 248 | int ret = 0; |
@@ -309,14 +309,14 @@ static ssize_t smi_request_store(struct device *dev, | |||
309 | switch (val) { | 309 | switch (val) { |
310 | case 2: | 310 | case 2: |
311 | /* Raw SMI */ | 311 | /* Raw SMI */ |
312 | ret = smi_request(smi_cmd); | 312 | ret = dcdbas_smi_request(smi_cmd); |
313 | if (!ret) | 313 | if (!ret) |
314 | ret = count; | 314 | ret = count; |
315 | break; | 315 | break; |
316 | case 1: | 316 | case 1: |
317 | /* Calling Interface SMI */ | 317 | /* Calling Interface SMI */ |
318 | smi_cmd->ebx = (u32) virt_to_phys(smi_cmd->command_buffer); | 318 | smi_cmd->ebx = (u32) virt_to_phys(smi_cmd->command_buffer); |
319 | ret = smi_request(smi_cmd); | 319 | ret = dcdbas_smi_request(smi_cmd); |
320 | if (!ret) | 320 | if (!ret) |
321 | ret = count; | 321 | ret = count; |
322 | break; | 322 | break; |
@@ -333,6 +333,7 @@ out: | |||
333 | mutex_unlock(&smi_data_lock); | 333 | mutex_unlock(&smi_data_lock); |
334 | return ret; | 334 | return ret; |
335 | } | 335 | } |
336 | EXPORT_SYMBOL(dcdbas_smi_request); | ||
336 | 337 | ||
337 | /** | 338 | /** |
338 | * host_control_smi: generate host control SMI | 339 | * host_control_smi: generate host control SMI |
diff --git a/drivers/firmware/dcdbas.h b/drivers/firmware/dcdbas.h index 87bc3417de27..ca3cb0a54ab6 100644 --- a/drivers/firmware/dcdbas.h +++ b/drivers/firmware/dcdbas.h | |||
@@ -101,5 +101,7 @@ struct apm_cmd { | |||
101 | } __attribute__ ((packed)) parameters; | 101 | } __attribute__ ((packed)) parameters; |
102 | } __attribute__ ((packed)); | 102 | } __attribute__ ((packed)); |
103 | 103 | ||
104 | int dcdbas_smi_request(struct smi_cmd *smi_cmd); | ||
105 | |||
104 | #endif /* _DCDBAS_H_ */ | 106 | #endif /* _DCDBAS_H_ */ |
105 | 107 | ||
diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c index 3bf8ee120d42..261b9aa3f248 100644 --- a/drivers/firmware/memmap.c +++ b/drivers/firmware/memmap.c | |||
@@ -56,9 +56,9 @@ struct memmap_attribute { | |||
56 | ssize_t (*show)(struct firmware_map_entry *entry, char *buf); | 56 | ssize_t (*show)(struct firmware_map_entry *entry, char *buf); |
57 | }; | 57 | }; |
58 | 58 | ||
59 | struct memmap_attribute memmap_start_attr = __ATTR_RO(start); | 59 | static struct memmap_attribute memmap_start_attr = __ATTR_RO(start); |
60 | struct memmap_attribute memmap_end_attr = __ATTR_RO(end); | 60 | static struct memmap_attribute memmap_end_attr = __ATTR_RO(end); |
61 | struct memmap_attribute memmap_type_attr = __ATTR_RO(type); | 61 | static struct memmap_attribute memmap_type_attr = __ATTR_RO(type); |
62 | 62 | ||
63 | /* | 63 | /* |
64 | * These are default attributes that are added for every memmap entry. | 64 | * These are default attributes that are added for every memmap entry. |
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index a812db243477..6ba57e91d7ab 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c | |||
@@ -2705,7 +2705,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2705 | sizeof(struct ietf_mpa_frame)); | 2705 | sizeof(struct ietf_mpa_frame)); |
2706 | 2706 | ||
2707 | 2707 | ||
2708 | /* notify OF layer that accept event was successfull */ | 2708 | /* notify OF layer that accept event was successful */ |
2709 | cm_id->add_ref(cm_id); | 2709 | cm_id->add_ref(cm_id); |
2710 | 2710 | ||
2711 | cm_event.event = IW_CM_EVENT_ESTABLISHED; | 2711 | cm_event.event = IW_CM_EVENT_ESTABLISHED; |
diff --git a/drivers/input/mouse/pxa930_trkball.c b/drivers/input/mouse/pxa930_trkball.c index a0f45c4fc198..d297accf9a7f 100644 --- a/drivers/input/mouse/pxa930_trkball.c +++ b/drivers/input/mouse/pxa930_trkball.c | |||
@@ -186,7 +186,7 @@ static int __devinit pxa930_trkball_probe(struct platform_device *pdev) | |||
186 | error = request_irq(irq, pxa930_trkball_interrupt, IRQF_DISABLED, | 186 | error = request_irq(irq, pxa930_trkball_interrupt, IRQF_DISABLED, |
187 | pdev->name, trkball); | 187 | pdev->name, trkball); |
188 | if (error) { | 188 | if (error) { |
189 | dev_err(&pdev->dev, "failed to request irq: %d\n", ret); | 189 | dev_err(&pdev->dev, "failed to request irq: %d\n", error); |
190 | goto failed_free_io; | 190 | goto failed_free_io; |
191 | } | 191 | } |
192 | 192 | ||
@@ -227,7 +227,7 @@ failed_free_io: | |||
227 | iounmap(trkball->mmio_base); | 227 | iounmap(trkball->mmio_base); |
228 | failed: | 228 | failed: |
229 | kfree(trkball); | 229 | kfree(trkball); |
230 | return ret; | 230 | return error; |
231 | } | 231 | } |
232 | 232 | ||
233 | static int __devexit pxa930_trkball_remove(struct platform_device *pdev) | 233 | static int __devexit pxa930_trkball_remove(struct platform_device *pdev) |
diff --git a/drivers/isdn/hardware/eicon/debuglib.h b/drivers/isdn/hardware/eicon/debuglib.h index 016410cf2273..8ea587783e14 100644 --- a/drivers/isdn/hardware/eicon/debuglib.h +++ b/drivers/isdn/hardware/eicon/debuglib.h | |||
@@ -235,7 +235,7 @@ typedef void ( * DbgOld) (unsigned short, char *, va_list) ; | |||
235 | typedef void ( * DbgEv) (unsigned short, unsigned long, va_list) ; | 235 | typedef void ( * DbgEv) (unsigned short, unsigned long, va_list) ; |
236 | typedef void ( * DbgIrq) (unsigned short, int, char *, va_list) ; | 236 | typedef void ( * DbgIrq) (unsigned short, int, char *, va_list) ; |
237 | typedef struct _DbgHandle_ | 237 | typedef struct _DbgHandle_ |
238 | { char Registered ; /* driver successfull registered */ | 238 | { char Registered ; /* driver successfully registered */ |
239 | #define DBG_HANDLE_REG_NEW 0x01 /* this (new) structure */ | 239 | #define DBG_HANDLE_REG_NEW 0x01 /* this (new) structure */ |
240 | #define DBG_HANDLE_REG_OLD 0x7f /* old structure (see below) */ | 240 | #define DBG_HANDLE_REG_OLD 0x7f /* old structure (see below) */ |
241 | char Version; /* version of this structure */ | 241 | char Version; /* version of this structure */ |
diff --git a/drivers/isdn/hardware/eicon/os_4bri.c b/drivers/isdn/hardware/eicon/os_4bri.c index 7b4ec3f60dbf..c964b8d91ada 100644 --- a/drivers/isdn/hardware/eicon/os_4bri.c +++ b/drivers/isdn/hardware/eicon/os_4bri.c | |||
@@ -997,7 +997,7 @@ diva_4bri_start_adapter(PISDN_ADAPTER IoAdapter, | |||
997 | diva_xdi_display_adapter_features(IoAdapter->ANum); | 997 | diva_xdi_display_adapter_features(IoAdapter->ANum); |
998 | 998 | ||
999 | for (i = 0; i < IoAdapter->tasks; i++) { | 999 | for (i = 0; i < IoAdapter->tasks; i++) { |
1000 | DBG_LOG(("A(%d) %s adapter successfull started", | 1000 | DBG_LOG(("A(%d) %s adapter successfully started", |
1001 | IoAdapter->QuadroList->QuadroAdapter[i]->ANum, | 1001 | IoAdapter->QuadroList->QuadroAdapter[i]->ANum, |
1002 | (IoAdapter->tasks == 1) ? "BRI 2.0" : "4BRI")) | 1002 | (IoAdapter->tasks == 1) ? "BRI 2.0" : "4BRI")) |
1003 | diva_xdi_didd_register_adapter(IoAdapter->QuadroList->QuadroAdapter[i]->ANum); | 1003 | diva_xdi_didd_register_adapter(IoAdapter->QuadroList->QuadroAdapter[i]->ANum); |
diff --git a/drivers/isdn/hardware/eicon/os_bri.c b/drivers/isdn/hardware/eicon/os_bri.c index f31bba5b16ff..08f01993f46b 100644 --- a/drivers/isdn/hardware/eicon/os_bri.c +++ b/drivers/isdn/hardware/eicon/os_bri.c | |||
@@ -736,7 +736,7 @@ diva_bri_start_adapter(PISDN_ADAPTER IoAdapter, | |||
736 | 736 | ||
737 | IoAdapter->Properties.Features = (word) features; | 737 | IoAdapter->Properties.Features = (word) features; |
738 | diva_xdi_display_adapter_features(IoAdapter->ANum); | 738 | diva_xdi_display_adapter_features(IoAdapter->ANum); |
739 | DBG_LOG(("A(%d) BRI adapter successfull started", IoAdapter->ANum)) | 739 | DBG_LOG(("A(%d) BRI adapter successfully started", IoAdapter->ANum)) |
740 | /* | 740 | /* |
741 | Register with DIDD | 741 | Register with DIDD |
742 | */ | 742 | */ |
diff --git a/drivers/isdn/hardware/eicon/os_pri.c b/drivers/isdn/hardware/eicon/os_pri.c index 903356547b79..5d65405c75f4 100644 --- a/drivers/isdn/hardware/eicon/os_pri.c +++ b/drivers/isdn/hardware/eicon/os_pri.c | |||
@@ -513,7 +513,7 @@ diva_pri_start_adapter(PISDN_ADAPTER IoAdapter, | |||
513 | 513 | ||
514 | diva_xdi_display_adapter_features(IoAdapter->ANum); | 514 | diva_xdi_display_adapter_features(IoAdapter->ANum); |
515 | 515 | ||
516 | DBG_LOG(("A(%d) PRI adapter successfull started", IoAdapter->ANum)) | 516 | DBG_LOG(("A(%d) PRI adapter successfully started", IoAdapter->ANum)) |
517 | /* | 517 | /* |
518 | Register with DIDD | 518 | Register with DIDD |
519 | */ | 519 | */ |
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index ab7c8e4a61f9..719943763391 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c | |||
@@ -215,7 +215,6 @@ static struct page *read_sb_page(mddev_t *mddev, long offset, | |||
215 | /* choose a good rdev and read the page from there */ | 215 | /* choose a good rdev and read the page from there */ |
216 | 216 | ||
217 | mdk_rdev_t *rdev; | 217 | mdk_rdev_t *rdev; |
218 | struct list_head *tmp; | ||
219 | sector_t target; | 218 | sector_t target; |
220 | 219 | ||
221 | if (!page) | 220 | if (!page) |
@@ -223,7 +222,7 @@ static struct page *read_sb_page(mddev_t *mddev, long offset, | |||
223 | if (!page) | 222 | if (!page) |
224 | return ERR_PTR(-ENOMEM); | 223 | return ERR_PTR(-ENOMEM); |
225 | 224 | ||
226 | rdev_for_each(rdev, tmp, mddev) { | 225 | list_for_each_entry(rdev, &mddev->disks, same_set) { |
227 | if (! test_bit(In_sync, &rdev->flags) | 226 | if (! test_bit(In_sync, &rdev->flags) |
228 | || test_bit(Faulty, &rdev->flags)) | 227 | || test_bit(Faulty, &rdev->flags)) |
229 | continue; | 228 | continue; |
@@ -964,9 +963,11 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) | |||
964 | */ | 963 | */ |
965 | page = bitmap->sb_page; | 964 | page = bitmap->sb_page; |
966 | offset = sizeof(bitmap_super_t); | 965 | offset = sizeof(bitmap_super_t); |
967 | read_sb_page(bitmap->mddev, bitmap->offset, | 966 | if (!file) |
968 | page, | 967 | read_sb_page(bitmap->mddev, |
969 | index, count); | 968 | bitmap->offset, |
969 | page, | ||
970 | index, count); | ||
970 | } else if (file) { | 971 | } else if (file) { |
971 | page = read_page(file, index, bitmap, count); | 972 | page = read_page(file, index, bitmap, count); |
972 | offset = 0; | 973 | offset = 0; |
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c index f26c1f9a475b..86d9adf90e79 100644 --- a/drivers/md/faulty.c +++ b/drivers/md/faulty.c | |||
@@ -283,7 +283,6 @@ static int reconfig(mddev_t *mddev, int layout, int chunk_size) | |||
283 | static int run(mddev_t *mddev) | 283 | static int run(mddev_t *mddev) |
284 | { | 284 | { |
285 | mdk_rdev_t *rdev; | 285 | mdk_rdev_t *rdev; |
286 | struct list_head *tmp; | ||
287 | int i; | 286 | int i; |
288 | 287 | ||
289 | conf_t *conf = kmalloc(sizeof(*conf), GFP_KERNEL); | 288 | conf_t *conf = kmalloc(sizeof(*conf), GFP_KERNEL); |
@@ -296,7 +295,7 @@ static int run(mddev_t *mddev) | |||
296 | } | 295 | } |
297 | conf->nfaults = 0; | 296 | conf->nfaults = 0; |
298 | 297 | ||
299 | rdev_for_each(rdev, tmp, mddev) | 298 | list_for_each_entry(rdev, &mddev->disks, same_set) |
300 | conf->rdev = rdev; | 299 | conf->rdev = rdev; |
301 | 300 | ||
302 | mddev->array_sectors = mddev->size * 2; | 301 | mddev->array_sectors = mddev->size * 2; |
diff --git a/drivers/md/linear.c b/drivers/md/linear.c index 3b90c5c924ec..1e3aea9eecf1 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c | |||
@@ -105,7 +105,6 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks) | |||
105 | int i, nb_zone, cnt; | 105 | int i, nb_zone, cnt; |
106 | sector_t min_sectors; | 106 | sector_t min_sectors; |
107 | sector_t curr_sector; | 107 | sector_t curr_sector; |
108 | struct list_head *tmp; | ||
109 | 108 | ||
110 | conf = kzalloc (sizeof (*conf) + raid_disks*sizeof(dev_info_t), | 109 | conf = kzalloc (sizeof (*conf) + raid_disks*sizeof(dev_info_t), |
111 | GFP_KERNEL); | 110 | GFP_KERNEL); |
@@ -115,7 +114,7 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks) | |||
115 | cnt = 0; | 114 | cnt = 0; |
116 | conf->array_sectors = 0; | 115 | conf->array_sectors = 0; |
117 | 116 | ||
118 | rdev_for_each(rdev, tmp, mddev) { | 117 | list_for_each_entry(rdev, &mddev->disks, same_set) { |
119 | int j = rdev->raid_disk; | 118 | int j = rdev->raid_disk; |
120 | dev_info_t *disk = conf->disks + j; | 119 | dev_info_t *disk = conf->disks + j; |
121 | 120 | ||
diff --git a/drivers/md/md.c b/drivers/md/md.c index 1b1d32694f6f..41e2509bf896 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -214,20 +214,33 @@ static inline mddev_t *mddev_get(mddev_t *mddev) | |||
214 | return mddev; | 214 | return mddev; |
215 | } | 215 | } |
216 | 216 | ||
217 | static void mddev_delayed_delete(struct work_struct *ws) | ||
218 | { | ||
219 | mddev_t *mddev = container_of(ws, mddev_t, del_work); | ||
220 | kobject_del(&mddev->kobj); | ||
221 | kobject_put(&mddev->kobj); | ||
222 | } | ||
223 | |||
217 | static void mddev_put(mddev_t *mddev) | 224 | static void mddev_put(mddev_t *mddev) |
218 | { | 225 | { |
219 | if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) | 226 | if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) |
220 | return; | 227 | return; |
221 | if (!mddev->raid_disks && list_empty(&mddev->disks)) { | 228 | if (!mddev->raid_disks && list_empty(&mddev->disks) && |
229 | !mddev->hold_active) { | ||
222 | list_del(&mddev->all_mddevs); | 230 | list_del(&mddev->all_mddevs); |
223 | spin_unlock(&all_mddevs_lock); | 231 | if (mddev->gendisk) { |
224 | blk_cleanup_queue(mddev->queue); | 232 | /* we did a probe so need to clean up. |
225 | if (mddev->sysfs_state) | 233 | * Call schedule_work inside the spinlock |
226 | sysfs_put(mddev->sysfs_state); | 234 | * so that flush_scheduled_work() after |
227 | mddev->sysfs_state = NULL; | 235 | * mddev_find will succeed in waiting for the |
228 | kobject_put(&mddev->kobj); | 236 | * work to be done. |
229 | } else | 237 | */ |
230 | spin_unlock(&all_mddevs_lock); | 238 | INIT_WORK(&mddev->del_work, mddev_delayed_delete); |
239 | schedule_work(&mddev->del_work); | ||
240 | } else | ||
241 | kfree(mddev); | ||
242 | } | ||
243 | spin_unlock(&all_mddevs_lock); | ||
231 | } | 244 | } |
232 | 245 | ||
233 | static mddev_t * mddev_find(dev_t unit) | 246 | static mddev_t * mddev_find(dev_t unit) |
@@ -236,15 +249,50 @@ static mddev_t * mddev_find(dev_t unit) | |||
236 | 249 | ||
237 | retry: | 250 | retry: |
238 | spin_lock(&all_mddevs_lock); | 251 | spin_lock(&all_mddevs_lock); |
239 | list_for_each_entry(mddev, &all_mddevs, all_mddevs) | 252 | |
240 | if (mddev->unit == unit) { | 253 | if (unit) { |
241 | mddev_get(mddev); | 254 | list_for_each_entry(mddev, &all_mddevs, all_mddevs) |
255 | if (mddev->unit == unit) { | ||
256 | mddev_get(mddev); | ||
257 | spin_unlock(&all_mddevs_lock); | ||
258 | kfree(new); | ||
259 | return mddev; | ||
260 | } | ||
261 | |||
262 | if (new) { | ||
263 | list_add(&new->all_mddevs, &all_mddevs); | ||
242 | spin_unlock(&all_mddevs_lock); | 264 | spin_unlock(&all_mddevs_lock); |
243 | kfree(new); | 265 | new->hold_active = UNTIL_IOCTL; |
244 | return mddev; | 266 | return new; |
245 | } | 267 | } |
246 | 268 | } else if (new) { | |
247 | if (new) { | 269 | /* find an unused unit number */ |
270 | static int next_minor = 512; | ||
271 | int start = next_minor; | ||
272 | int is_free = 0; | ||
273 | int dev = 0; | ||
274 | while (!is_free) { | ||
275 | dev = MKDEV(MD_MAJOR, next_minor); | ||
276 | next_minor++; | ||
277 | if (next_minor > MINORMASK) | ||
278 | next_minor = 0; | ||
279 | if (next_minor == start) { | ||
280 | /* Oh dear, all in use. */ | ||
281 | spin_unlock(&all_mddevs_lock); | ||
282 | kfree(new); | ||
283 | return NULL; | ||
284 | } | ||
285 | |||
286 | is_free = 1; | ||
287 | list_for_each_entry(mddev, &all_mddevs, all_mddevs) | ||
288 | if (mddev->unit == dev) { | ||
289 | is_free = 0; | ||
290 | break; | ||
291 | } | ||
292 | } | ||
293 | new->unit = dev; | ||
294 | new->md_minor = MINOR(dev); | ||
295 | new->hold_active = UNTIL_STOP; | ||
248 | list_add(&new->all_mddevs, &all_mddevs); | 296 | list_add(&new->all_mddevs, &all_mddevs); |
249 | spin_unlock(&all_mddevs_lock); | 297 | spin_unlock(&all_mddevs_lock); |
250 | return new; | 298 | return new; |
@@ -275,16 +323,6 @@ static mddev_t * mddev_find(dev_t unit) | |||
275 | new->resync_max = MaxSector; | 323 | new->resync_max = MaxSector; |
276 | new->level = LEVEL_NONE; | 324 | new->level = LEVEL_NONE; |
277 | 325 | ||
278 | new->queue = blk_alloc_queue(GFP_KERNEL); | ||
279 | if (!new->queue) { | ||
280 | kfree(new); | ||
281 | return NULL; | ||
282 | } | ||
283 | /* Can be unlocked because the queue is new: no concurrency */ | ||
284 | queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, new->queue); | ||
285 | |||
286 | blk_queue_make_request(new->queue, md_fail_request); | ||
287 | |||
288 | goto retry; | 326 | goto retry; |
289 | } | 327 | } |
290 | 328 | ||
@@ -307,25 +345,23 @@ static inline void mddev_unlock(mddev_t * mddev) | |||
307 | 345 | ||
308 | static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr) | 346 | static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr) |
309 | { | 347 | { |
310 | mdk_rdev_t * rdev; | 348 | mdk_rdev_t *rdev; |
311 | struct list_head *tmp; | ||
312 | 349 | ||
313 | rdev_for_each(rdev, tmp, mddev) { | 350 | list_for_each_entry(rdev, &mddev->disks, same_set) |
314 | if (rdev->desc_nr == nr) | 351 | if (rdev->desc_nr == nr) |
315 | return rdev; | 352 | return rdev; |
316 | } | 353 | |
317 | return NULL; | 354 | return NULL; |
318 | } | 355 | } |
319 | 356 | ||
320 | static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev) | 357 | static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev) |
321 | { | 358 | { |
322 | struct list_head *tmp; | ||
323 | mdk_rdev_t *rdev; | 359 | mdk_rdev_t *rdev; |
324 | 360 | ||
325 | rdev_for_each(rdev, tmp, mddev) { | 361 | list_for_each_entry(rdev, &mddev->disks, same_set) |
326 | if (rdev->bdev->bd_dev == dev) | 362 | if (rdev->bdev->bd_dev == dev) |
327 | return rdev; | 363 | return rdev; |
328 | } | 364 | |
329 | return NULL; | 365 | return NULL; |
330 | } | 366 | } |
331 | 367 | ||
@@ -861,7 +897,6 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev) | |||
861 | static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) | 897 | static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) |
862 | { | 898 | { |
863 | mdp_super_t *sb; | 899 | mdp_super_t *sb; |
864 | struct list_head *tmp; | ||
865 | mdk_rdev_t *rdev2; | 900 | mdk_rdev_t *rdev2; |
866 | int next_spare = mddev->raid_disks; | 901 | int next_spare = mddev->raid_disks; |
867 | 902 | ||
@@ -933,7 +968,7 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) | |||
933 | sb->state |= (1<<MD_SB_BITMAP_PRESENT); | 968 | sb->state |= (1<<MD_SB_BITMAP_PRESENT); |
934 | 969 | ||
935 | sb->disks[0].state = (1<<MD_DISK_REMOVED); | 970 | sb->disks[0].state = (1<<MD_DISK_REMOVED); |
936 | rdev_for_each(rdev2, tmp, mddev) { | 971 | list_for_each_entry(rdev2, &mddev->disks, same_set) { |
937 | mdp_disk_t *d; | 972 | mdp_disk_t *d; |
938 | int desc_nr; | 973 | int desc_nr; |
939 | if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags) | 974 | if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags) |
@@ -1259,7 +1294,6 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev) | |||
1259 | static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev) | 1294 | static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev) |
1260 | { | 1295 | { |
1261 | struct mdp_superblock_1 *sb; | 1296 | struct mdp_superblock_1 *sb; |
1262 | struct list_head *tmp; | ||
1263 | mdk_rdev_t *rdev2; | 1297 | mdk_rdev_t *rdev2; |
1264 | int max_dev, i; | 1298 | int max_dev, i; |
1265 | /* make rdev->sb match mddev and rdev data. */ | 1299 | /* make rdev->sb match mddev and rdev data. */ |
@@ -1307,7 +1341,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev) | |||
1307 | } | 1341 | } |
1308 | 1342 | ||
1309 | max_dev = 0; | 1343 | max_dev = 0; |
1310 | rdev_for_each(rdev2, tmp, mddev) | 1344 | list_for_each_entry(rdev2, &mddev->disks, same_set) |
1311 | if (rdev2->desc_nr+1 > max_dev) | 1345 | if (rdev2->desc_nr+1 > max_dev) |
1312 | max_dev = rdev2->desc_nr+1; | 1346 | max_dev = rdev2->desc_nr+1; |
1313 | 1347 | ||
@@ -1316,7 +1350,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev) | |||
1316 | for (i=0; i<max_dev;i++) | 1350 | for (i=0; i<max_dev;i++) |
1317 | sb->dev_roles[i] = cpu_to_le16(0xfffe); | 1351 | sb->dev_roles[i] = cpu_to_le16(0xfffe); |
1318 | 1352 | ||
1319 | rdev_for_each(rdev2, tmp, mddev) { | 1353 | list_for_each_entry(rdev2, &mddev->disks, same_set) { |
1320 | i = rdev2->desc_nr; | 1354 | i = rdev2->desc_nr; |
1321 | if (test_bit(Faulty, &rdev2->flags)) | 1355 | if (test_bit(Faulty, &rdev2->flags)) |
1322 | sb->dev_roles[i] = cpu_to_le16(0xfffe); | 1356 | sb->dev_roles[i] = cpu_to_le16(0xfffe); |
@@ -1466,6 +1500,9 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev) | |||
1466 | 1500 | ||
1467 | list_add_rcu(&rdev->same_set, &mddev->disks); | 1501 | list_add_rcu(&rdev->same_set, &mddev->disks); |
1468 | bd_claim_by_disk(rdev->bdev, rdev->bdev->bd_holder, mddev->gendisk); | 1502 | bd_claim_by_disk(rdev->bdev, rdev->bdev->bd_holder, mddev->gendisk); |
1503 | |||
1504 | /* May as well allow recovery to be retried once */ | ||
1505 | mddev->recovery_disabled = 0; | ||
1469 | return 0; | 1506 | return 0; |
1470 | 1507 | ||
1471 | fail: | 1508 | fail: |
@@ -1571,8 +1608,7 @@ static void kick_rdev_from_array(mdk_rdev_t * rdev) | |||
1571 | 1608 | ||
1572 | static void export_array(mddev_t *mddev) | 1609 | static void export_array(mddev_t *mddev) |
1573 | { | 1610 | { |
1574 | struct list_head *tmp; | 1611 | mdk_rdev_t *rdev, *tmp; |
1575 | mdk_rdev_t *rdev; | ||
1576 | 1612 | ||
1577 | rdev_for_each(rdev, tmp, mddev) { | 1613 | rdev_for_each(rdev, tmp, mddev) { |
1578 | if (!rdev->mddev) { | 1614 | if (!rdev->mddev) { |
@@ -1593,7 +1629,7 @@ static void print_desc(mdp_disk_t *desc) | |||
1593 | desc->major,desc->minor,desc->raid_disk,desc->state); | 1629 | desc->major,desc->minor,desc->raid_disk,desc->state); |
1594 | } | 1630 | } |
1595 | 1631 | ||
1596 | static void print_sb(mdp_super_t *sb) | 1632 | static void print_sb_90(mdp_super_t *sb) |
1597 | { | 1633 | { |
1598 | int i; | 1634 | int i; |
1599 | 1635 | ||
@@ -1624,10 +1660,57 @@ static void print_sb(mdp_super_t *sb) | |||
1624 | } | 1660 | } |
1625 | printk(KERN_INFO "md: THIS: "); | 1661 | printk(KERN_INFO "md: THIS: "); |
1626 | print_desc(&sb->this_disk); | 1662 | print_desc(&sb->this_disk); |
1627 | |||
1628 | } | 1663 | } |
1629 | 1664 | ||
1630 | static void print_rdev(mdk_rdev_t *rdev) | 1665 | static void print_sb_1(struct mdp_superblock_1 *sb) |
1666 | { | ||
1667 | __u8 *uuid; | ||
1668 | |||
1669 | uuid = sb->set_uuid; | ||
1670 | printk(KERN_INFO "md: SB: (V:%u) (F:0x%08x) Array-ID:<%02x%02x%02x%02x" | ||
1671 | ":%02x%02x:%02x%02x:%02x%02x:%02x%02x%02x%02x%02x%02x>\n" | ||
1672 | KERN_INFO "md: Name: \"%s\" CT:%llu\n", | ||
1673 | le32_to_cpu(sb->major_version), | ||
1674 | le32_to_cpu(sb->feature_map), | ||
1675 | uuid[0], uuid[1], uuid[2], uuid[3], | ||
1676 | uuid[4], uuid[5], uuid[6], uuid[7], | ||
1677 | uuid[8], uuid[9], uuid[10], uuid[11], | ||
1678 | uuid[12], uuid[13], uuid[14], uuid[15], | ||
1679 | sb->set_name, | ||
1680 | (unsigned long long)le64_to_cpu(sb->ctime) | ||
1681 | & MD_SUPERBLOCK_1_TIME_SEC_MASK); | ||
1682 | |||
1683 | uuid = sb->device_uuid; | ||
1684 | printk(KERN_INFO "md: L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu" | ||
1685 | " RO:%llu\n" | ||
1686 | KERN_INFO "md: Dev:%08x UUID: %02x%02x%02x%02x:%02x%02x:%02x%02x:%02x%02x" | ||
1687 | ":%02x%02x%02x%02x%02x%02x\n" | ||
1688 | KERN_INFO "md: (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n" | ||
1689 | KERN_INFO "md: (MaxDev:%u) \n", | ||
1690 | le32_to_cpu(sb->level), | ||
1691 | (unsigned long long)le64_to_cpu(sb->size), | ||
1692 | le32_to_cpu(sb->raid_disks), | ||
1693 | le32_to_cpu(sb->layout), | ||
1694 | le32_to_cpu(sb->chunksize), | ||
1695 | (unsigned long long)le64_to_cpu(sb->data_offset), | ||
1696 | (unsigned long long)le64_to_cpu(sb->data_size), | ||
1697 | (unsigned long long)le64_to_cpu(sb->super_offset), | ||
1698 | (unsigned long long)le64_to_cpu(sb->recovery_offset), | ||
1699 | le32_to_cpu(sb->dev_number), | ||
1700 | uuid[0], uuid[1], uuid[2], uuid[3], | ||
1701 | uuid[4], uuid[5], uuid[6], uuid[7], | ||
1702 | uuid[8], uuid[9], uuid[10], uuid[11], | ||
1703 | uuid[12], uuid[13], uuid[14], uuid[15], | ||
1704 | sb->devflags, | ||
1705 | (unsigned long long)le64_to_cpu(sb->utime) & MD_SUPERBLOCK_1_TIME_SEC_MASK, | ||
1706 | (unsigned long long)le64_to_cpu(sb->events), | ||
1707 | (unsigned long long)le64_to_cpu(sb->resync_offset), | ||
1708 | le32_to_cpu(sb->sb_csum), | ||
1709 | le32_to_cpu(sb->max_dev) | ||
1710 | ); | ||
1711 | } | ||
1712 | |||
1713 | static void print_rdev(mdk_rdev_t *rdev, int major_version) | ||
1631 | { | 1714 | { |
1632 | char b[BDEVNAME_SIZE]; | 1715 | char b[BDEVNAME_SIZE]; |
1633 | printk(KERN_INFO "md: rdev %s, SZ:%08llu F:%d S:%d DN:%u\n", | 1716 | printk(KERN_INFO "md: rdev %s, SZ:%08llu F:%d S:%d DN:%u\n", |
@@ -1635,15 +1718,22 @@ static void print_rdev(mdk_rdev_t *rdev) | |||
1635 | test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags), | 1718 | test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags), |
1636 | rdev->desc_nr); | 1719 | rdev->desc_nr); |
1637 | if (rdev->sb_loaded) { | 1720 | if (rdev->sb_loaded) { |
1638 | printk(KERN_INFO "md: rdev superblock:\n"); | 1721 | printk(KERN_INFO "md: rdev superblock (MJ:%d):\n", major_version); |
1639 | print_sb((mdp_super_t*)page_address(rdev->sb_page)); | 1722 | switch (major_version) { |
1723 | case 0: | ||
1724 | print_sb_90((mdp_super_t*)page_address(rdev->sb_page)); | ||
1725 | break; | ||
1726 | case 1: | ||
1727 | print_sb_1((struct mdp_superblock_1 *)page_address(rdev->sb_page)); | ||
1728 | break; | ||
1729 | } | ||
1640 | } else | 1730 | } else |
1641 | printk(KERN_INFO "md: no rdev superblock!\n"); | 1731 | printk(KERN_INFO "md: no rdev superblock!\n"); |
1642 | } | 1732 | } |
1643 | 1733 | ||
1644 | static void md_print_devices(void) | 1734 | static void md_print_devices(void) |
1645 | { | 1735 | { |
1646 | struct list_head *tmp, *tmp2; | 1736 | struct list_head *tmp; |
1647 | mdk_rdev_t *rdev; | 1737 | mdk_rdev_t *rdev; |
1648 | mddev_t *mddev; | 1738 | mddev_t *mddev; |
1649 | char b[BDEVNAME_SIZE]; | 1739 | char b[BDEVNAME_SIZE]; |
@@ -1658,12 +1748,12 @@ static void md_print_devices(void) | |||
1658 | bitmap_print_sb(mddev->bitmap); | 1748 | bitmap_print_sb(mddev->bitmap); |
1659 | else | 1749 | else |
1660 | printk("%s: ", mdname(mddev)); | 1750 | printk("%s: ", mdname(mddev)); |
1661 | rdev_for_each(rdev, tmp2, mddev) | 1751 | list_for_each_entry(rdev, &mddev->disks, same_set) |
1662 | printk("<%s>", bdevname(rdev->bdev,b)); | 1752 | printk("<%s>", bdevname(rdev->bdev,b)); |
1663 | printk("\n"); | 1753 | printk("\n"); |
1664 | 1754 | ||
1665 | rdev_for_each(rdev, tmp2, mddev) | 1755 | list_for_each_entry(rdev, &mddev->disks, same_set) |
1666 | print_rdev(rdev); | 1756 | print_rdev(rdev, mddev->major_version); |
1667 | } | 1757 | } |
1668 | printk("md: **********************************\n"); | 1758 | printk("md: **********************************\n"); |
1669 | printk("\n"); | 1759 | printk("\n"); |
@@ -1679,9 +1769,8 @@ static void sync_sbs(mddev_t * mddev, int nospares) | |||
1679 | * with the rest of the array) | 1769 | * with the rest of the array) |
1680 | */ | 1770 | */ |
1681 | mdk_rdev_t *rdev; | 1771 | mdk_rdev_t *rdev; |
1682 | struct list_head *tmp; | ||
1683 | 1772 | ||
1684 | rdev_for_each(rdev, tmp, mddev) { | 1773 | list_for_each_entry(rdev, &mddev->disks, same_set) { |
1685 | if (rdev->sb_events == mddev->events || | 1774 | if (rdev->sb_events == mddev->events || |
1686 | (nospares && | 1775 | (nospares && |
1687 | rdev->raid_disk < 0 && | 1776 | rdev->raid_disk < 0 && |
@@ -1699,7 +1788,6 @@ static void sync_sbs(mddev_t * mddev, int nospares) | |||
1699 | 1788 | ||
1700 | static void md_update_sb(mddev_t * mddev, int force_change) | 1789 | static void md_update_sb(mddev_t * mddev, int force_change) |
1701 | { | 1790 | { |
1702 | struct list_head *tmp; | ||
1703 | mdk_rdev_t *rdev; | 1791 | mdk_rdev_t *rdev; |
1704 | int sync_req; | 1792 | int sync_req; |
1705 | int nospares = 0; | 1793 | int nospares = 0; |
@@ -1790,7 +1878,7 @@ repeat: | |||
1790 | mdname(mddev),mddev->in_sync); | 1878 | mdname(mddev),mddev->in_sync); |
1791 | 1879 | ||
1792 | bitmap_update_sb(mddev->bitmap); | 1880 | bitmap_update_sb(mddev->bitmap); |
1793 | rdev_for_each(rdev, tmp, mddev) { | 1881 | list_for_each_entry(rdev, &mddev->disks, same_set) { |
1794 | char b[BDEVNAME_SIZE]; | 1882 | char b[BDEVNAME_SIZE]; |
1795 | dprintk(KERN_INFO "md: "); | 1883 | dprintk(KERN_INFO "md: "); |
1796 | if (rdev->sb_loaded != 1) | 1884 | if (rdev->sb_loaded != 1) |
@@ -1999,7 +2087,6 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len) | |||
1999 | md_wakeup_thread(rdev->mddev->thread); | 2087 | md_wakeup_thread(rdev->mddev->thread); |
2000 | } else if (rdev->mddev->pers) { | 2088 | } else if (rdev->mddev->pers) { |
2001 | mdk_rdev_t *rdev2; | 2089 | mdk_rdev_t *rdev2; |
2002 | struct list_head *tmp; | ||
2003 | /* Activating a spare .. or possibly reactivating | 2090 | /* Activating a spare .. or possibly reactivating |
2004 | * if we every get bitmaps working here. | 2091 | * if we every get bitmaps working here. |
2005 | */ | 2092 | */ |
@@ -2010,7 +2097,7 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len) | |||
2010 | if (rdev->mddev->pers->hot_add_disk == NULL) | 2097 | if (rdev->mddev->pers->hot_add_disk == NULL) |
2011 | return -EINVAL; | 2098 | return -EINVAL; |
2012 | 2099 | ||
2013 | rdev_for_each(rdev2, tmp, rdev->mddev) | 2100 | list_for_each_entry(rdev2, &rdev->mddev->disks, same_set) |
2014 | if (rdev2->raid_disk == slot) | 2101 | if (rdev2->raid_disk == slot) |
2015 | return -EEXIST; | 2102 | return -EEXIST; |
2016 | 2103 | ||
@@ -2125,14 +2212,14 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len) | |||
2125 | */ | 2212 | */ |
2126 | mddev_t *mddev; | 2213 | mddev_t *mddev; |
2127 | int overlap = 0; | 2214 | int overlap = 0; |
2128 | struct list_head *tmp, *tmp2; | 2215 | struct list_head *tmp; |
2129 | 2216 | ||
2130 | mddev_unlock(my_mddev); | 2217 | mddev_unlock(my_mddev); |
2131 | for_each_mddev(mddev, tmp) { | 2218 | for_each_mddev(mddev, tmp) { |
2132 | mdk_rdev_t *rdev2; | 2219 | mdk_rdev_t *rdev2; |
2133 | 2220 | ||
2134 | mddev_lock(mddev); | 2221 | mddev_lock(mddev); |
2135 | rdev_for_each(rdev2, tmp2, mddev) | 2222 | list_for_each_entry(rdev2, &mddev->disks, same_set) |
2136 | if (test_bit(AllReserved, &rdev2->flags) || | 2223 | if (test_bit(AllReserved, &rdev2->flags) || |
2137 | (rdev->bdev == rdev2->bdev && | 2224 | (rdev->bdev == rdev2->bdev && |
2138 | rdev != rdev2 && | 2225 | rdev != rdev2 && |
@@ -2328,8 +2415,7 @@ abort_free: | |||
2328 | static void analyze_sbs(mddev_t * mddev) | 2415 | static void analyze_sbs(mddev_t * mddev) |
2329 | { | 2416 | { |
2330 | int i; | 2417 | int i; |
2331 | struct list_head *tmp; | 2418 | mdk_rdev_t *rdev, *freshest, *tmp; |
2332 | mdk_rdev_t *rdev, *freshest; | ||
2333 | char b[BDEVNAME_SIZE]; | 2419 | char b[BDEVNAME_SIZE]; |
2334 | 2420 | ||
2335 | freshest = NULL; | 2421 | freshest = NULL; |
@@ -3046,7 +3132,7 @@ action_store(mddev_t *mddev, const char *page, size_t len) | |||
3046 | } | 3132 | } |
3047 | set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); | 3133 | set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); |
3048 | md_wakeup_thread(mddev->thread); | 3134 | md_wakeup_thread(mddev->thread); |
3049 | sysfs_notify(&mddev->kobj, NULL, "sync_action"); | 3135 | sysfs_notify_dirent(mddev->sysfs_action); |
3050 | return len; | 3136 | return len; |
3051 | } | 3137 | } |
3052 | 3138 | ||
@@ -3404,6 +3490,8 @@ md_attr_store(struct kobject *kobj, struct attribute *attr, | |||
3404 | if (!capable(CAP_SYS_ADMIN)) | 3490 | if (!capable(CAP_SYS_ADMIN)) |
3405 | return -EACCES; | 3491 | return -EACCES; |
3406 | rv = mddev_lock(mddev); | 3492 | rv = mddev_lock(mddev); |
3493 | if (mddev->hold_active == UNTIL_IOCTL) | ||
3494 | mddev->hold_active = 0; | ||
3407 | if (!rv) { | 3495 | if (!rv) { |
3408 | rv = entry->store(mddev, page, length); | 3496 | rv = entry->store(mddev, page, length); |
3409 | mddev_unlock(mddev); | 3497 | mddev_unlock(mddev); |
@@ -3414,6 +3502,17 @@ md_attr_store(struct kobject *kobj, struct attribute *attr, | |||
3414 | static void md_free(struct kobject *ko) | 3502 | static void md_free(struct kobject *ko) |
3415 | { | 3503 | { |
3416 | mddev_t *mddev = container_of(ko, mddev_t, kobj); | 3504 | mddev_t *mddev = container_of(ko, mddev_t, kobj); |
3505 | |||
3506 | if (mddev->sysfs_state) | ||
3507 | sysfs_put(mddev->sysfs_state); | ||
3508 | |||
3509 | if (mddev->gendisk) { | ||
3510 | del_gendisk(mddev->gendisk); | ||
3511 | put_disk(mddev->gendisk); | ||
3512 | } | ||
3513 | if (mddev->queue) | ||
3514 | blk_cleanup_queue(mddev->queue); | ||
3515 | |||
3417 | kfree(mddev); | 3516 | kfree(mddev); |
3418 | } | 3517 | } |
3419 | 3518 | ||
@@ -3429,34 +3528,74 @@ static struct kobj_type md_ktype = { | |||
3429 | 3528 | ||
3430 | int mdp_major = 0; | 3529 | int mdp_major = 0; |
3431 | 3530 | ||
3432 | static struct kobject *md_probe(dev_t dev, int *part, void *data) | 3531 | static int md_alloc(dev_t dev, char *name) |
3433 | { | 3532 | { |
3434 | static DEFINE_MUTEX(disks_mutex); | 3533 | static DEFINE_MUTEX(disks_mutex); |
3435 | mddev_t *mddev = mddev_find(dev); | 3534 | mddev_t *mddev = mddev_find(dev); |
3436 | struct gendisk *disk; | 3535 | struct gendisk *disk; |
3437 | int partitioned = (MAJOR(dev) != MD_MAJOR); | 3536 | int partitioned; |
3438 | int shift = partitioned ? MdpMinorShift : 0; | 3537 | int shift; |
3439 | int unit = MINOR(dev) >> shift; | 3538 | int unit; |
3440 | int error; | 3539 | int error; |
3441 | 3540 | ||
3442 | if (!mddev) | 3541 | if (!mddev) |
3443 | return NULL; | 3542 | return -ENODEV; |
3543 | |||
3544 | partitioned = (MAJOR(mddev->unit) != MD_MAJOR); | ||
3545 | shift = partitioned ? MdpMinorShift : 0; | ||
3546 | unit = MINOR(mddev->unit) >> shift; | ||
3547 | |||
3548 | /* wait for any previous instance if this device | ||
3549 | * to be completed removed (mddev_delayed_delete). | ||
3550 | */ | ||
3551 | flush_scheduled_work(); | ||
3444 | 3552 | ||
3445 | mutex_lock(&disks_mutex); | 3553 | mutex_lock(&disks_mutex); |
3446 | if (mddev->gendisk) { | 3554 | if (mddev->gendisk) { |
3447 | mutex_unlock(&disks_mutex); | 3555 | mutex_unlock(&disks_mutex); |
3448 | mddev_put(mddev); | 3556 | mddev_put(mddev); |
3449 | return NULL; | 3557 | return -EEXIST; |
3558 | } | ||
3559 | |||
3560 | if (name) { | ||
3561 | /* Need to ensure that 'name' is not a duplicate. | ||
3562 | */ | ||
3563 | mddev_t *mddev2; | ||
3564 | spin_lock(&all_mddevs_lock); | ||
3565 | |||
3566 | list_for_each_entry(mddev2, &all_mddevs, all_mddevs) | ||
3567 | if (mddev2->gendisk && | ||
3568 | strcmp(mddev2->gendisk->disk_name, name) == 0) { | ||
3569 | spin_unlock(&all_mddevs_lock); | ||
3570 | return -EEXIST; | ||
3571 | } | ||
3572 | spin_unlock(&all_mddevs_lock); | ||
3573 | } | ||
3574 | |||
3575 | mddev->queue = blk_alloc_queue(GFP_KERNEL); | ||
3576 | if (!mddev->queue) { | ||
3577 | mutex_unlock(&disks_mutex); | ||
3578 | mddev_put(mddev); | ||
3579 | return -ENOMEM; | ||
3450 | } | 3580 | } |
3581 | /* Can be unlocked because the queue is new: no concurrency */ | ||
3582 | queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, mddev->queue); | ||
3583 | |||
3584 | blk_queue_make_request(mddev->queue, md_fail_request); | ||
3585 | |||
3451 | disk = alloc_disk(1 << shift); | 3586 | disk = alloc_disk(1 << shift); |
3452 | if (!disk) { | 3587 | if (!disk) { |
3453 | mutex_unlock(&disks_mutex); | 3588 | mutex_unlock(&disks_mutex); |
3589 | blk_cleanup_queue(mddev->queue); | ||
3590 | mddev->queue = NULL; | ||
3454 | mddev_put(mddev); | 3591 | mddev_put(mddev); |
3455 | return NULL; | 3592 | return -ENOMEM; |
3456 | } | 3593 | } |
3457 | disk->major = MAJOR(dev); | 3594 | disk->major = MAJOR(mddev->unit); |
3458 | disk->first_minor = unit << shift; | 3595 | disk->first_minor = unit << shift; |
3459 | if (partitioned) | 3596 | if (name) |
3597 | strcpy(disk->disk_name, name); | ||
3598 | else if (partitioned) | ||
3460 | sprintf(disk->disk_name, "md_d%d", unit); | 3599 | sprintf(disk->disk_name, "md_d%d", unit); |
3461 | else | 3600 | else |
3462 | sprintf(disk->disk_name, "md%d", unit); | 3601 | sprintf(disk->disk_name, "md%d", unit); |
@@ -3464,7 +3603,7 @@ static struct kobject *md_probe(dev_t dev, int *part, void *data) | |||
3464 | disk->private_data = mddev; | 3603 | disk->private_data = mddev; |
3465 | disk->queue = mddev->queue; | 3604 | disk->queue = mddev->queue; |
3466 | /* Allow extended partitions. This makes the | 3605 | /* Allow extended partitions. This makes the |
3467 | * 'mdp' device redundant, but we can really | 3606 | * 'mdp' device redundant, but we can't really |
3468 | * remove it now. | 3607 | * remove it now. |
3469 | */ | 3608 | */ |
3470 | disk->flags |= GENHD_FL_EXT_DEVT; | 3609 | disk->flags |= GENHD_FL_EXT_DEVT; |
@@ -3480,9 +3619,35 @@ static struct kobject *md_probe(dev_t dev, int *part, void *data) | |||
3480 | kobject_uevent(&mddev->kobj, KOBJ_ADD); | 3619 | kobject_uevent(&mddev->kobj, KOBJ_ADD); |
3481 | mddev->sysfs_state = sysfs_get_dirent(mddev->kobj.sd, "array_state"); | 3620 | mddev->sysfs_state = sysfs_get_dirent(mddev->kobj.sd, "array_state"); |
3482 | } | 3621 | } |
3622 | mddev_put(mddev); | ||
3623 | return 0; | ||
3624 | } | ||
3625 | |||
3626 | static struct kobject *md_probe(dev_t dev, int *part, void *data) | ||
3627 | { | ||
3628 | md_alloc(dev, NULL); | ||
3483 | return NULL; | 3629 | return NULL; |
3484 | } | 3630 | } |
3485 | 3631 | ||
3632 | static int add_named_array(const char *val, struct kernel_param *kp) | ||
3633 | { | ||
3634 | /* val must be "md_*" where * is not all digits. | ||
3635 | * We allocate an array with a large free minor number, and | ||
3636 | * set the name to val. val must not already be an active name. | ||
3637 | */ | ||
3638 | int len = strlen(val); | ||
3639 | char buf[DISK_NAME_LEN]; | ||
3640 | |||
3641 | while (len && val[len-1] == '\n') | ||
3642 | len--; | ||
3643 | if (len >= DISK_NAME_LEN) | ||
3644 | return -E2BIG; | ||
3645 | strlcpy(buf, val, len+1); | ||
3646 | if (strncmp(buf, "md_", 3) != 0) | ||
3647 | return -EINVAL; | ||
3648 | return md_alloc(0, buf); | ||
3649 | } | ||
3650 | |||
3486 | static void md_safemode_timeout(unsigned long data) | 3651 | static void md_safemode_timeout(unsigned long data) |
3487 | { | 3652 | { |
3488 | mddev_t *mddev = (mddev_t *) data; | 3653 | mddev_t *mddev = (mddev_t *) data; |
@@ -3501,7 +3666,6 @@ static int do_md_run(mddev_t * mddev) | |||
3501 | { | 3666 | { |
3502 | int err; | 3667 | int err; |
3503 | int chunk_size; | 3668 | int chunk_size; |
3504 | struct list_head *tmp; | ||
3505 | mdk_rdev_t *rdev; | 3669 | mdk_rdev_t *rdev; |
3506 | struct gendisk *disk; | 3670 | struct gendisk *disk; |
3507 | struct mdk_personality *pers; | 3671 | struct mdk_personality *pers; |
@@ -3540,7 +3704,7 @@ static int do_md_run(mddev_t * mddev) | |||
3540 | } | 3704 | } |
3541 | 3705 | ||
3542 | /* devices must have minimum size of one chunk */ | 3706 | /* devices must have minimum size of one chunk */ |
3543 | rdev_for_each(rdev, tmp, mddev) { | 3707 | list_for_each_entry(rdev, &mddev->disks, same_set) { |
3544 | if (test_bit(Faulty, &rdev->flags)) | 3708 | if (test_bit(Faulty, &rdev->flags)) |
3545 | continue; | 3709 | continue; |
3546 | if (rdev->size < chunk_size / 1024) { | 3710 | if (rdev->size < chunk_size / 1024) { |
@@ -3565,7 +3729,7 @@ static int do_md_run(mddev_t * mddev) | |||
3565 | * the only valid external interface is through the md | 3729 | * the only valid external interface is through the md |
3566 | * device. | 3730 | * device. |
3567 | */ | 3731 | */ |
3568 | rdev_for_each(rdev, tmp, mddev) { | 3732 | list_for_each_entry(rdev, &mddev->disks, same_set) { |
3569 | if (test_bit(Faulty, &rdev->flags)) | 3733 | if (test_bit(Faulty, &rdev->flags)) |
3570 | continue; | 3734 | continue; |
3571 | sync_blockdev(rdev->bdev); | 3735 | sync_blockdev(rdev->bdev); |
@@ -3630,10 +3794,10 @@ static int do_md_run(mddev_t * mddev) | |||
3630 | */ | 3794 | */ |
3631 | char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; | 3795 | char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; |
3632 | mdk_rdev_t *rdev2; | 3796 | mdk_rdev_t *rdev2; |
3633 | struct list_head *tmp2; | ||
3634 | int warned = 0; | 3797 | int warned = 0; |
3635 | rdev_for_each(rdev, tmp, mddev) { | 3798 | |
3636 | rdev_for_each(rdev2, tmp2, mddev) { | 3799 | list_for_each_entry(rdev, &mddev->disks, same_set) |
3800 | list_for_each_entry(rdev2, &mddev->disks, same_set) { | ||
3637 | if (rdev < rdev2 && | 3801 | if (rdev < rdev2 && |
3638 | rdev->bdev->bd_contains == | 3802 | rdev->bdev->bd_contains == |
3639 | rdev2->bdev->bd_contains) { | 3803 | rdev2->bdev->bd_contains) { |
@@ -3647,7 +3811,7 @@ static int do_md_run(mddev_t * mddev) | |||
3647 | warned = 1; | 3811 | warned = 1; |
3648 | } | 3812 | } |
3649 | } | 3813 | } |
3650 | } | 3814 | |
3651 | if (warned) | 3815 | if (warned) |
3652 | printk(KERN_WARNING | 3816 | printk(KERN_WARNING |
3653 | "True protection against single-disk" | 3817 | "True protection against single-disk" |
@@ -3684,6 +3848,7 @@ static int do_md_run(mddev_t * mddev) | |||
3684 | printk(KERN_WARNING | 3848 | printk(KERN_WARNING |
3685 | "md: cannot register extra attributes for %s\n", | 3849 | "md: cannot register extra attributes for %s\n", |
3686 | mdname(mddev)); | 3850 | mdname(mddev)); |
3851 | mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action"); | ||
3687 | } else if (mddev->ro == 2) /* auto-readonly not meaningful */ | 3852 | } else if (mddev->ro == 2) /* auto-readonly not meaningful */ |
3688 | mddev->ro = 0; | 3853 | mddev->ro = 0; |
3689 | 3854 | ||
@@ -3694,7 +3859,7 @@ static int do_md_run(mddev_t * mddev) | |||
3694 | mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */ | 3859 | mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */ |
3695 | mddev->in_sync = 1; | 3860 | mddev->in_sync = 1; |
3696 | 3861 | ||
3697 | rdev_for_each(rdev, tmp, mddev) | 3862 | list_for_each_entry(rdev, &mddev->disks, same_set) |
3698 | if (rdev->raid_disk >= 0) { | 3863 | if (rdev->raid_disk >= 0) { |
3699 | char nm[20]; | 3864 | char nm[20]; |
3700 | sprintf(nm, "rd%d", rdev->raid_disk); | 3865 | sprintf(nm, "rd%d", rdev->raid_disk); |
@@ -3725,9 +3890,8 @@ static int do_md_run(mddev_t * mddev) | |||
3725 | * it will remove the drives and not do the right thing | 3890 | * it will remove the drives and not do the right thing |
3726 | */ | 3891 | */ |
3727 | if (mddev->degraded && !mddev->sync_thread) { | 3892 | if (mddev->degraded && !mddev->sync_thread) { |
3728 | struct list_head *rtmp; | ||
3729 | int spares = 0; | 3893 | int spares = 0; |
3730 | rdev_for_each(rdev, rtmp, mddev) | 3894 | list_for_each_entry(rdev, &mddev->disks, same_set) |
3731 | if (rdev->raid_disk >= 0 && | 3895 | if (rdev->raid_disk >= 0 && |
3732 | !test_bit(In_sync, &rdev->flags) && | 3896 | !test_bit(In_sync, &rdev->flags) && |
3733 | !test_bit(Faulty, &rdev->flags)) | 3897 | !test_bit(Faulty, &rdev->flags)) |
@@ -3754,7 +3918,8 @@ static int do_md_run(mddev_t * mddev) | |||
3754 | mddev->changed = 1; | 3918 | mddev->changed = 1; |
3755 | md_new_event(mddev); | 3919 | md_new_event(mddev); |
3756 | sysfs_notify_dirent(mddev->sysfs_state); | 3920 | sysfs_notify_dirent(mddev->sysfs_state); |
3757 | sysfs_notify(&mddev->kobj, NULL, "sync_action"); | 3921 | if (mddev->sysfs_action) |
3922 | sysfs_notify_dirent(mddev->sysfs_action); | ||
3758 | sysfs_notify(&mddev->kobj, NULL, "degraded"); | 3923 | sysfs_notify(&mddev->kobj, NULL, "degraded"); |
3759 | kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); | 3924 | kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); |
3760 | return 0; | 3925 | return 0; |
@@ -3854,9 +4019,12 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open) | |||
3854 | mddev->queue->merge_bvec_fn = NULL; | 4019 | mddev->queue->merge_bvec_fn = NULL; |
3855 | mddev->queue->unplug_fn = NULL; | 4020 | mddev->queue->unplug_fn = NULL; |
3856 | mddev->queue->backing_dev_info.congested_fn = NULL; | 4021 | mddev->queue->backing_dev_info.congested_fn = NULL; |
3857 | if (mddev->pers->sync_request) | 4022 | if (mddev->pers->sync_request) { |
3858 | sysfs_remove_group(&mddev->kobj, &md_redundancy_group); | 4023 | sysfs_remove_group(&mddev->kobj, &md_redundancy_group); |
3859 | 4024 | if (mddev->sysfs_action) | |
4025 | sysfs_put(mddev->sysfs_action); | ||
4026 | mddev->sysfs_action = NULL; | ||
4027 | } | ||
3860 | module_put(mddev->pers->owner); | 4028 | module_put(mddev->pers->owner); |
3861 | mddev->pers = NULL; | 4029 | mddev->pers = NULL; |
3862 | /* tell userspace to handle 'inactive' */ | 4030 | /* tell userspace to handle 'inactive' */ |
@@ -3883,7 +4051,6 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open) | |||
3883 | */ | 4051 | */ |
3884 | if (mode == 0) { | 4052 | if (mode == 0) { |
3885 | mdk_rdev_t *rdev; | 4053 | mdk_rdev_t *rdev; |
3886 | struct list_head *tmp; | ||
3887 | 4054 | ||
3888 | printk(KERN_INFO "md: %s stopped.\n", mdname(mddev)); | 4055 | printk(KERN_INFO "md: %s stopped.\n", mdname(mddev)); |
3889 | 4056 | ||
@@ -3895,7 +4062,7 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open) | |||
3895 | } | 4062 | } |
3896 | mddev->bitmap_offset = 0; | 4063 | mddev->bitmap_offset = 0; |
3897 | 4064 | ||
3898 | rdev_for_each(rdev, tmp, mddev) | 4065 | list_for_each_entry(rdev, &mddev->disks, same_set) |
3899 | if (rdev->raid_disk >= 0) { | 4066 | if (rdev->raid_disk >= 0) { |
3900 | char nm[20]; | 4067 | char nm[20]; |
3901 | sprintf(nm, "rd%d", rdev->raid_disk); | 4068 | sprintf(nm, "rd%d", rdev->raid_disk); |
@@ -3941,6 +4108,8 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open) | |||
3941 | mddev->barriers_work = 0; | 4108 | mddev->barriers_work = 0; |
3942 | mddev->safemode = 0; | 4109 | mddev->safemode = 0; |
3943 | kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); | 4110 | kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); |
4111 | if (mddev->hold_active == UNTIL_STOP) | ||
4112 | mddev->hold_active = 0; | ||
3944 | 4113 | ||
3945 | } else if (mddev->pers) | 4114 | } else if (mddev->pers) |
3946 | printk(KERN_INFO "md: %s switched to read-only mode.\n", | 4115 | printk(KERN_INFO "md: %s switched to read-only mode.\n", |
@@ -3956,7 +4125,6 @@ out: | |||
3956 | static void autorun_array(mddev_t *mddev) | 4125 | static void autorun_array(mddev_t *mddev) |
3957 | { | 4126 | { |
3958 | mdk_rdev_t *rdev; | 4127 | mdk_rdev_t *rdev; |
3959 | struct list_head *tmp; | ||
3960 | int err; | 4128 | int err; |
3961 | 4129 | ||
3962 | if (list_empty(&mddev->disks)) | 4130 | if (list_empty(&mddev->disks)) |
@@ -3964,7 +4132,7 @@ static void autorun_array(mddev_t *mddev) | |||
3964 | 4132 | ||
3965 | printk(KERN_INFO "md: running: "); | 4133 | printk(KERN_INFO "md: running: "); |
3966 | 4134 | ||
3967 | rdev_for_each(rdev, tmp, mddev) { | 4135 | list_for_each_entry(rdev, &mddev->disks, same_set) { |
3968 | char b[BDEVNAME_SIZE]; | 4136 | char b[BDEVNAME_SIZE]; |
3969 | printk("<%s>", bdevname(rdev->bdev,b)); | 4137 | printk("<%s>", bdevname(rdev->bdev,b)); |
3970 | } | 4138 | } |
@@ -3991,8 +4159,7 @@ static void autorun_array(mddev_t *mddev) | |||
3991 | */ | 4159 | */ |
3992 | static void autorun_devices(int part) | 4160 | static void autorun_devices(int part) |
3993 | { | 4161 | { |
3994 | struct list_head *tmp; | 4162 | mdk_rdev_t *rdev0, *rdev, *tmp; |
3995 | mdk_rdev_t *rdev0, *rdev; | ||
3996 | mddev_t *mddev; | 4163 | mddev_t *mddev; |
3997 | char b[BDEVNAME_SIZE]; | 4164 | char b[BDEVNAME_SIZE]; |
3998 | 4165 | ||
@@ -4007,7 +4174,7 @@ static void autorun_devices(int part) | |||
4007 | printk(KERN_INFO "md: considering %s ...\n", | 4174 | printk(KERN_INFO "md: considering %s ...\n", |
4008 | bdevname(rdev0->bdev,b)); | 4175 | bdevname(rdev0->bdev,b)); |
4009 | INIT_LIST_HEAD(&candidates); | 4176 | INIT_LIST_HEAD(&candidates); |
4010 | rdev_for_each_list(rdev, tmp, pending_raid_disks) | 4177 | rdev_for_each_list(rdev, tmp, &pending_raid_disks) |
4011 | if (super_90_load(rdev, rdev0, 0) >= 0) { | 4178 | if (super_90_load(rdev, rdev0, 0) >= 0) { |
4012 | printk(KERN_INFO "md: adding %s ...\n", | 4179 | printk(KERN_INFO "md: adding %s ...\n", |
4013 | bdevname(rdev->bdev,b)); | 4180 | bdevname(rdev->bdev,b)); |
@@ -4053,7 +4220,7 @@ static void autorun_devices(int part) | |||
4053 | } else { | 4220 | } else { |
4054 | printk(KERN_INFO "md: created %s\n", mdname(mddev)); | 4221 | printk(KERN_INFO "md: created %s\n", mdname(mddev)); |
4055 | mddev->persistent = 1; | 4222 | mddev->persistent = 1; |
4056 | rdev_for_each_list(rdev, tmp, candidates) { | 4223 | rdev_for_each_list(rdev, tmp, &candidates) { |
4057 | list_del_init(&rdev->same_set); | 4224 | list_del_init(&rdev->same_set); |
4058 | if (bind_rdev_to_array(rdev, mddev)) | 4225 | if (bind_rdev_to_array(rdev, mddev)) |
4059 | export_rdev(rdev); | 4226 | export_rdev(rdev); |
@@ -4064,7 +4231,7 @@ static void autorun_devices(int part) | |||
4064 | /* on success, candidates will be empty, on error | 4231 | /* on success, candidates will be empty, on error |
4065 | * it won't... | 4232 | * it won't... |
4066 | */ | 4233 | */ |
4067 | rdev_for_each_list(rdev, tmp, candidates) { | 4234 | rdev_for_each_list(rdev, tmp, &candidates) { |
4068 | list_del_init(&rdev->same_set); | 4235 | list_del_init(&rdev->same_set); |
4069 | export_rdev(rdev); | 4236 | export_rdev(rdev); |
4070 | } | 4237 | } |
@@ -4093,10 +4260,9 @@ static int get_array_info(mddev_t * mddev, void __user * arg) | |||
4093 | mdu_array_info_t info; | 4260 | mdu_array_info_t info; |
4094 | int nr,working,active,failed,spare; | 4261 | int nr,working,active,failed,spare; |
4095 | mdk_rdev_t *rdev; | 4262 | mdk_rdev_t *rdev; |
4096 | struct list_head *tmp; | ||
4097 | 4263 | ||
4098 | nr=working=active=failed=spare=0; | 4264 | nr=working=active=failed=spare=0; |
4099 | rdev_for_each(rdev, tmp, mddev) { | 4265 | list_for_each_entry(rdev, &mddev->disks, same_set) { |
4100 | nr++; | 4266 | nr++; |
4101 | if (test_bit(Faulty, &rdev->flags)) | 4267 | if (test_bit(Faulty, &rdev->flags)) |
4102 | failed++; | 4268 | failed++; |
@@ -4614,9 +4780,8 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info) | |||
4614 | 4780 | ||
4615 | static int update_size(mddev_t *mddev, sector_t num_sectors) | 4781 | static int update_size(mddev_t *mddev, sector_t num_sectors) |
4616 | { | 4782 | { |
4617 | mdk_rdev_t * rdev; | 4783 | mdk_rdev_t *rdev; |
4618 | int rv; | 4784 | int rv; |
4619 | struct list_head *tmp; | ||
4620 | int fit = (num_sectors == 0); | 4785 | int fit = (num_sectors == 0); |
4621 | 4786 | ||
4622 | if (mddev->pers->resize == NULL) | 4787 | if (mddev->pers->resize == NULL) |
@@ -4638,7 +4803,7 @@ static int update_size(mddev_t *mddev, sector_t num_sectors) | |||
4638 | * grow, and re-add. | 4803 | * grow, and re-add. |
4639 | */ | 4804 | */ |
4640 | return -EBUSY; | 4805 | return -EBUSY; |
4641 | rdev_for_each(rdev, tmp, mddev) { | 4806 | list_for_each_entry(rdev, &mddev->disks, same_set) { |
4642 | sector_t avail; | 4807 | sector_t avail; |
4643 | avail = rdev->size * 2; | 4808 | avail = rdev->size * 2; |
4644 | 4809 | ||
@@ -5000,6 +5165,9 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode, | |||
5000 | 5165 | ||
5001 | done_unlock: | 5166 | done_unlock: |
5002 | abort_unlock: | 5167 | abort_unlock: |
5168 | if (mddev->hold_active == UNTIL_IOCTL && | ||
5169 | err != -EINVAL) | ||
5170 | mddev->hold_active = 0; | ||
5003 | mddev_unlock(mddev); | 5171 | mddev_unlock(mddev); |
5004 | 5172 | ||
5005 | return err; | 5173 | return err; |
@@ -5016,14 +5184,25 @@ static int md_open(struct block_device *bdev, fmode_t mode) | |||
5016 | * Succeed if we can lock the mddev, which confirms that | 5184 | * Succeed if we can lock the mddev, which confirms that |
5017 | * it isn't being stopped right now. | 5185 | * it isn't being stopped right now. |
5018 | */ | 5186 | */ |
5019 | mddev_t *mddev = bdev->bd_disk->private_data; | 5187 | mddev_t *mddev = mddev_find(bdev->bd_dev); |
5020 | int err; | 5188 | int err; |
5021 | 5189 | ||
5190 | if (mddev->gendisk != bdev->bd_disk) { | ||
5191 | /* we are racing with mddev_put which is discarding this | ||
5192 | * bd_disk. | ||
5193 | */ | ||
5194 | mddev_put(mddev); | ||
5195 | /* Wait until bdev->bd_disk is definitely gone */ | ||
5196 | flush_scheduled_work(); | ||
5197 | /* Then retry the open from the top */ | ||
5198 | return -ERESTARTSYS; | ||
5199 | } | ||
5200 | BUG_ON(mddev != bdev->bd_disk->private_data); | ||
5201 | |||
5022 | if ((err = mutex_lock_interruptible_nested(&mddev->reconfig_mutex, 1))) | 5202 | if ((err = mutex_lock_interruptible_nested(&mddev->reconfig_mutex, 1))) |
5023 | goto out; | 5203 | goto out; |
5024 | 5204 | ||
5025 | err = 0; | 5205 | err = 0; |
5026 | mddev_get(mddev); | ||
5027 | atomic_inc(&mddev->openers); | 5206 | atomic_inc(&mddev->openers); |
5028 | mddev_unlock(mddev); | 5207 | mddev_unlock(mddev); |
5029 | 5208 | ||
@@ -5187,11 +5366,10 @@ static void status_unused(struct seq_file *seq) | |||
5187 | { | 5366 | { |
5188 | int i = 0; | 5367 | int i = 0; |
5189 | mdk_rdev_t *rdev; | 5368 | mdk_rdev_t *rdev; |
5190 | struct list_head *tmp; | ||
5191 | 5369 | ||
5192 | seq_printf(seq, "unused devices: "); | 5370 | seq_printf(seq, "unused devices: "); |
5193 | 5371 | ||
5194 | rdev_for_each_list(rdev, tmp, pending_raid_disks) { | 5372 | list_for_each_entry(rdev, &pending_raid_disks, same_set) { |
5195 | char b[BDEVNAME_SIZE]; | 5373 | char b[BDEVNAME_SIZE]; |
5196 | i++; | 5374 | i++; |
5197 | seq_printf(seq, "%s ", | 5375 | seq_printf(seq, "%s ", |
@@ -5350,7 +5528,6 @@ static int md_seq_show(struct seq_file *seq, void *v) | |||
5350 | { | 5528 | { |
5351 | mddev_t *mddev = v; | 5529 | mddev_t *mddev = v; |
5352 | sector_t size; | 5530 | sector_t size; |
5353 | struct list_head *tmp2; | ||
5354 | mdk_rdev_t *rdev; | 5531 | mdk_rdev_t *rdev; |
5355 | struct mdstat_info *mi = seq->private; | 5532 | struct mdstat_info *mi = seq->private; |
5356 | struct bitmap *bitmap; | 5533 | struct bitmap *bitmap; |
@@ -5387,7 +5564,7 @@ static int md_seq_show(struct seq_file *seq, void *v) | |||
5387 | } | 5564 | } |
5388 | 5565 | ||
5389 | size = 0; | 5566 | size = 0; |
5390 | rdev_for_each(rdev, tmp2, mddev) { | 5567 | list_for_each_entry(rdev, &mddev->disks, same_set) { |
5391 | char b[BDEVNAME_SIZE]; | 5568 | char b[BDEVNAME_SIZE]; |
5392 | seq_printf(seq, " %s[%d]", | 5569 | seq_printf(seq, " %s[%d]", |
5393 | bdevname(rdev->bdev,b), rdev->desc_nr); | 5570 | bdevname(rdev->bdev,b), rdev->desc_nr); |
@@ -5694,7 +5871,6 @@ void md_do_sync(mddev_t *mddev) | |||
5694 | struct list_head *tmp; | 5871 | struct list_head *tmp; |
5695 | sector_t last_check; | 5872 | sector_t last_check; |
5696 | int skipped = 0; | 5873 | int skipped = 0; |
5697 | struct list_head *rtmp; | ||
5698 | mdk_rdev_t *rdev; | 5874 | mdk_rdev_t *rdev; |
5699 | char *desc; | 5875 | char *desc; |
5700 | 5876 | ||
@@ -5799,7 +5975,7 @@ void md_do_sync(mddev_t *mddev) | |||
5799 | /* recovery follows the physical size of devices */ | 5975 | /* recovery follows the physical size of devices */ |
5800 | max_sectors = mddev->size << 1; | 5976 | max_sectors = mddev->size << 1; |
5801 | j = MaxSector; | 5977 | j = MaxSector; |
5802 | rdev_for_each(rdev, rtmp, mddev) | 5978 | list_for_each_entry(rdev, &mddev->disks, same_set) |
5803 | if (rdev->raid_disk >= 0 && | 5979 | if (rdev->raid_disk >= 0 && |
5804 | !test_bit(Faulty, &rdev->flags) && | 5980 | !test_bit(Faulty, &rdev->flags) && |
5805 | !test_bit(In_sync, &rdev->flags) && | 5981 | !test_bit(In_sync, &rdev->flags) && |
@@ -5949,7 +6125,7 @@ void md_do_sync(mddev_t *mddev) | |||
5949 | } else { | 6125 | } else { |
5950 | if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) | 6126 | if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) |
5951 | mddev->curr_resync = MaxSector; | 6127 | mddev->curr_resync = MaxSector; |
5952 | rdev_for_each(rdev, rtmp, mddev) | 6128 | list_for_each_entry(rdev, &mddev->disks, same_set) |
5953 | if (rdev->raid_disk >= 0 && | 6129 | if (rdev->raid_disk >= 0 && |
5954 | !test_bit(Faulty, &rdev->flags) && | 6130 | !test_bit(Faulty, &rdev->flags) && |
5955 | !test_bit(In_sync, &rdev->flags) && | 6131 | !test_bit(In_sync, &rdev->flags) && |
@@ -5985,10 +6161,9 @@ EXPORT_SYMBOL_GPL(md_do_sync); | |||
5985 | static int remove_and_add_spares(mddev_t *mddev) | 6161 | static int remove_and_add_spares(mddev_t *mddev) |
5986 | { | 6162 | { |
5987 | mdk_rdev_t *rdev; | 6163 | mdk_rdev_t *rdev; |
5988 | struct list_head *rtmp; | ||
5989 | int spares = 0; | 6164 | int spares = 0; |
5990 | 6165 | ||
5991 | rdev_for_each(rdev, rtmp, mddev) | 6166 | list_for_each_entry(rdev, &mddev->disks, same_set) |
5992 | if (rdev->raid_disk >= 0 && | 6167 | if (rdev->raid_disk >= 0 && |
5993 | !test_bit(Blocked, &rdev->flags) && | 6168 | !test_bit(Blocked, &rdev->flags) && |
5994 | (test_bit(Faulty, &rdev->flags) || | 6169 | (test_bit(Faulty, &rdev->flags) || |
@@ -6003,8 +6178,8 @@ static int remove_and_add_spares(mddev_t *mddev) | |||
6003 | } | 6178 | } |
6004 | } | 6179 | } |
6005 | 6180 | ||
6006 | if (mddev->degraded && ! mddev->ro) { | 6181 | if (mddev->degraded && ! mddev->ro && !mddev->recovery_disabled) { |
6007 | rdev_for_each(rdev, rtmp, mddev) { | 6182 | list_for_each_entry(rdev, &mddev->disks, same_set) { |
6008 | if (rdev->raid_disk >= 0 && | 6183 | if (rdev->raid_disk >= 0 && |
6009 | !test_bit(In_sync, &rdev->flags) && | 6184 | !test_bit(In_sync, &rdev->flags) && |
6010 | !test_bit(Blocked, &rdev->flags)) | 6185 | !test_bit(Blocked, &rdev->flags)) |
@@ -6056,7 +6231,6 @@ static int remove_and_add_spares(mddev_t *mddev) | |||
6056 | void md_check_recovery(mddev_t *mddev) | 6231 | void md_check_recovery(mddev_t *mddev) |
6057 | { | 6232 | { |
6058 | mdk_rdev_t *rdev; | 6233 | mdk_rdev_t *rdev; |
6059 | struct list_head *rtmp; | ||
6060 | 6234 | ||
6061 | 6235 | ||
6062 | if (mddev->bitmap) | 6236 | if (mddev->bitmap) |
@@ -6120,7 +6294,7 @@ void md_check_recovery(mddev_t *mddev) | |||
6120 | if (mddev->flags) | 6294 | if (mddev->flags) |
6121 | md_update_sb(mddev, 0); | 6295 | md_update_sb(mddev, 0); |
6122 | 6296 | ||
6123 | rdev_for_each(rdev, rtmp, mddev) | 6297 | list_for_each_entry(rdev, &mddev->disks, same_set) |
6124 | if (test_and_clear_bit(StateChanged, &rdev->flags)) | 6298 | if (test_and_clear_bit(StateChanged, &rdev->flags)) |
6125 | sysfs_notify_dirent(rdev->sysfs_state); | 6299 | sysfs_notify_dirent(rdev->sysfs_state); |
6126 | 6300 | ||
@@ -6149,13 +6323,13 @@ void md_check_recovery(mddev_t *mddev) | |||
6149 | * information must be scrapped | 6323 | * information must be scrapped |
6150 | */ | 6324 | */ |
6151 | if (!mddev->degraded) | 6325 | if (!mddev->degraded) |
6152 | rdev_for_each(rdev, rtmp, mddev) | 6326 | list_for_each_entry(rdev, &mddev->disks, same_set) |
6153 | rdev->saved_raid_disk = -1; | 6327 | rdev->saved_raid_disk = -1; |
6154 | 6328 | ||
6155 | mddev->recovery = 0; | 6329 | mddev->recovery = 0; |
6156 | /* flag recovery needed just to double check */ | 6330 | /* flag recovery needed just to double check */ |
6157 | set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); | 6331 | set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); |
6158 | sysfs_notify(&mddev->kobj, NULL, "sync_action"); | 6332 | sysfs_notify_dirent(mddev->sysfs_action); |
6159 | md_new_event(mddev); | 6333 | md_new_event(mddev); |
6160 | goto unlock; | 6334 | goto unlock; |
6161 | } | 6335 | } |
@@ -6216,7 +6390,7 @@ void md_check_recovery(mddev_t *mddev) | |||
6216 | mddev->recovery = 0; | 6390 | mddev->recovery = 0; |
6217 | } else | 6391 | } else |
6218 | md_wakeup_thread(mddev->sync_thread); | 6392 | md_wakeup_thread(mddev->sync_thread); |
6219 | sysfs_notify(&mddev->kobj, NULL, "sync_action"); | 6393 | sysfs_notify_dirent(mddev->sysfs_action); |
6220 | md_new_event(mddev); | 6394 | md_new_event(mddev); |
6221 | } | 6395 | } |
6222 | unlock: | 6396 | unlock: |
@@ -6224,7 +6398,8 @@ void md_check_recovery(mddev_t *mddev) | |||
6224 | clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); | 6398 | clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); |
6225 | if (test_and_clear_bit(MD_RECOVERY_RECOVER, | 6399 | if (test_and_clear_bit(MD_RECOVERY_RECOVER, |
6226 | &mddev->recovery)) | 6400 | &mddev->recovery)) |
6227 | sysfs_notify(&mddev->kobj, NULL, "sync_action"); | 6401 | if (mddev->sysfs_action) |
6402 | sysfs_notify_dirent(mddev->sysfs_action); | ||
6228 | } | 6403 | } |
6229 | mddev_unlock(mddev); | 6404 | mddev_unlock(mddev); |
6230 | } | 6405 | } |
@@ -6386,14 +6561,8 @@ static __exit void md_exit(void) | |||
6386 | unregister_sysctl_table(raid_table_header); | 6561 | unregister_sysctl_table(raid_table_header); |
6387 | remove_proc_entry("mdstat", NULL); | 6562 | remove_proc_entry("mdstat", NULL); |
6388 | for_each_mddev(mddev, tmp) { | 6563 | for_each_mddev(mddev, tmp) { |
6389 | struct gendisk *disk = mddev->gendisk; | ||
6390 | if (!disk) | ||
6391 | continue; | ||
6392 | export_array(mddev); | 6564 | export_array(mddev); |
6393 | del_gendisk(disk); | 6565 | mddev->hold_active = 0; |
6394 | put_disk(disk); | ||
6395 | mddev->gendisk = NULL; | ||
6396 | mddev_put(mddev); | ||
6397 | } | 6566 | } |
6398 | } | 6567 | } |
6399 | 6568 | ||
@@ -6418,6 +6587,7 @@ static int set_ro(const char *val, struct kernel_param *kp) | |||
6418 | module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR); | 6587 | module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR); |
6419 | module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR); | 6588 | module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR); |
6420 | 6589 | ||
6590 | module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR); | ||
6421 | 6591 | ||
6422 | EXPORT_SYMBOL(register_md_personality); | 6592 | EXPORT_SYMBOL(register_md_personality); |
6423 | EXPORT_SYMBOL(unregister_md_personality); | 6593 | EXPORT_SYMBOL(unregister_md_personality); |
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index d4ac47d11279..f6d08f241671 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c | |||
@@ -408,7 +408,6 @@ static int multipath_run (mddev_t *mddev) | |||
408 | int disk_idx; | 408 | int disk_idx; |
409 | struct multipath_info *disk; | 409 | struct multipath_info *disk; |
410 | mdk_rdev_t *rdev; | 410 | mdk_rdev_t *rdev; |
411 | struct list_head *tmp; | ||
412 | 411 | ||
413 | if (mddev->level != LEVEL_MULTIPATH) { | 412 | if (mddev->level != LEVEL_MULTIPATH) { |
414 | printk("multipath: %s: raid level not set to multipath IO (%d)\n", | 413 | printk("multipath: %s: raid level not set to multipath IO (%d)\n", |
@@ -441,7 +440,7 @@ static int multipath_run (mddev_t *mddev) | |||
441 | } | 440 | } |
442 | 441 | ||
443 | conf->working_disks = 0; | 442 | conf->working_disks = 0; |
444 | rdev_for_each(rdev, tmp, mddev) { | 443 | list_for_each_entry(rdev, &mddev->disks, same_set) { |
445 | disk_idx = rdev->raid_disk; | 444 | disk_idx = rdev->raid_disk; |
446 | if (disk_idx < 0 || | 445 | if (disk_idx < 0 || |
447 | disk_idx >= mddev->raid_disks) | 446 | disk_idx >= mddev->raid_disks) |
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 8ac6488ad0dc..c605ba805586 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c | |||
@@ -53,11 +53,10 @@ static int raid0_congested(void *data, int bits) | |||
53 | static int create_strip_zones (mddev_t *mddev) | 53 | static int create_strip_zones (mddev_t *mddev) |
54 | { | 54 | { |
55 | int i, c, j; | 55 | int i, c, j; |
56 | sector_t current_offset, curr_zone_offset; | 56 | sector_t current_start, curr_zone_start; |
57 | sector_t min_spacing; | 57 | sector_t min_spacing; |
58 | raid0_conf_t *conf = mddev_to_conf(mddev); | 58 | raid0_conf_t *conf = mddev_to_conf(mddev); |
59 | mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev; | 59 | mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev; |
60 | struct list_head *tmp1, *tmp2; | ||
61 | struct strip_zone *zone; | 60 | struct strip_zone *zone; |
62 | int cnt; | 61 | int cnt; |
63 | char b[BDEVNAME_SIZE]; | 62 | char b[BDEVNAME_SIZE]; |
@@ -67,19 +66,19 @@ static int create_strip_zones (mddev_t *mddev) | |||
67 | */ | 66 | */ |
68 | conf->nr_strip_zones = 0; | 67 | conf->nr_strip_zones = 0; |
69 | 68 | ||
70 | rdev_for_each(rdev1, tmp1, mddev) { | 69 | list_for_each_entry(rdev1, &mddev->disks, same_set) { |
71 | printk("raid0: looking at %s\n", | 70 | printk(KERN_INFO "raid0: looking at %s\n", |
72 | bdevname(rdev1->bdev,b)); | 71 | bdevname(rdev1->bdev,b)); |
73 | c = 0; | 72 | c = 0; |
74 | rdev_for_each(rdev2, tmp2, mddev) { | 73 | list_for_each_entry(rdev2, &mddev->disks, same_set) { |
75 | printk("raid0: comparing %s(%llu)", | 74 | printk(KERN_INFO "raid0: comparing %s(%llu)", |
76 | bdevname(rdev1->bdev,b), | 75 | bdevname(rdev1->bdev,b), |
77 | (unsigned long long)rdev1->size); | 76 | (unsigned long long)rdev1->size); |
78 | printk(" with %s(%llu)\n", | 77 | printk(KERN_INFO " with %s(%llu)\n", |
79 | bdevname(rdev2->bdev,b), | 78 | bdevname(rdev2->bdev,b), |
80 | (unsigned long long)rdev2->size); | 79 | (unsigned long long)rdev2->size); |
81 | if (rdev2 == rdev1) { | 80 | if (rdev2 == rdev1) { |
82 | printk("raid0: END\n"); | 81 | printk(KERN_INFO "raid0: END\n"); |
83 | break; | 82 | break; |
84 | } | 83 | } |
85 | if (rdev2->size == rdev1->size) | 84 | if (rdev2->size == rdev1->size) |
@@ -88,19 +87,20 @@ static int create_strip_zones (mddev_t *mddev) | |||
88 | * Not unique, don't count it as a new | 87 | * Not unique, don't count it as a new |
89 | * group | 88 | * group |
90 | */ | 89 | */ |
91 | printk("raid0: EQUAL\n"); | 90 | printk(KERN_INFO "raid0: EQUAL\n"); |
92 | c = 1; | 91 | c = 1; |
93 | break; | 92 | break; |
94 | } | 93 | } |
95 | printk("raid0: NOT EQUAL\n"); | 94 | printk(KERN_INFO "raid0: NOT EQUAL\n"); |
96 | } | 95 | } |
97 | if (!c) { | 96 | if (!c) { |
98 | printk("raid0: ==> UNIQUE\n"); | 97 | printk(KERN_INFO "raid0: ==> UNIQUE\n"); |
99 | conf->nr_strip_zones++; | 98 | conf->nr_strip_zones++; |
100 | printk("raid0: %d zones\n", conf->nr_strip_zones); | 99 | printk(KERN_INFO "raid0: %d zones\n", |
100 | conf->nr_strip_zones); | ||
101 | } | 101 | } |
102 | } | 102 | } |
103 | printk("raid0: FINAL %d zones\n", conf->nr_strip_zones); | 103 | printk(KERN_INFO "raid0: FINAL %d zones\n", conf->nr_strip_zones); |
104 | 104 | ||
105 | conf->strip_zone = kzalloc(sizeof(struct strip_zone)* | 105 | conf->strip_zone = kzalloc(sizeof(struct strip_zone)* |
106 | conf->nr_strip_zones, GFP_KERNEL); | 106 | conf->nr_strip_zones, GFP_KERNEL); |
@@ -119,16 +119,17 @@ static int create_strip_zones (mddev_t *mddev) | |||
119 | cnt = 0; | 119 | cnt = 0; |
120 | smallest = NULL; | 120 | smallest = NULL; |
121 | zone->dev = conf->devlist; | 121 | zone->dev = conf->devlist; |
122 | rdev_for_each(rdev1, tmp1, mddev) { | 122 | list_for_each_entry(rdev1, &mddev->disks, same_set) { |
123 | int j = rdev1->raid_disk; | 123 | int j = rdev1->raid_disk; |
124 | 124 | ||
125 | if (j < 0 || j >= mddev->raid_disks) { | 125 | if (j < 0 || j >= mddev->raid_disks) { |
126 | printk("raid0: bad disk number %d - aborting!\n", j); | 126 | printk(KERN_ERR "raid0: bad disk number %d - " |
127 | "aborting!\n", j); | ||
127 | goto abort; | 128 | goto abort; |
128 | } | 129 | } |
129 | if (zone->dev[j]) { | 130 | if (zone->dev[j]) { |
130 | printk("raid0: multiple devices for %d - aborting!\n", | 131 | printk(KERN_ERR "raid0: multiple devices for %d - " |
131 | j); | 132 | "aborting!\n", j); |
132 | goto abort; | 133 | goto abort; |
133 | } | 134 | } |
134 | zone->dev[j] = rdev1; | 135 | zone->dev[j] = rdev1; |
@@ -149,16 +150,16 @@ static int create_strip_zones (mddev_t *mddev) | |||
149 | cnt++; | 150 | cnt++; |
150 | } | 151 | } |
151 | if (cnt != mddev->raid_disks) { | 152 | if (cnt != mddev->raid_disks) { |
152 | printk("raid0: too few disks (%d of %d) - aborting!\n", | 153 | printk(KERN_ERR "raid0: too few disks (%d of %d) - " |
153 | cnt, mddev->raid_disks); | 154 | "aborting!\n", cnt, mddev->raid_disks); |
154 | goto abort; | 155 | goto abort; |
155 | } | 156 | } |
156 | zone->nb_dev = cnt; | 157 | zone->nb_dev = cnt; |
157 | zone->size = smallest->size * cnt; | 158 | zone->sectors = smallest->size * cnt * 2; |
158 | zone->zone_offset = 0; | 159 | zone->zone_start = 0; |
159 | 160 | ||
160 | current_offset = smallest->size; | 161 | current_start = smallest->size * 2; |
161 | curr_zone_offset = zone->size; | 162 | curr_zone_start = zone->sectors; |
162 | 163 | ||
163 | /* now do the other zones */ | 164 | /* now do the other zones */ |
164 | for (i = 1; i < conf->nr_strip_zones; i++) | 165 | for (i = 1; i < conf->nr_strip_zones; i++) |
@@ -166,40 +167,41 @@ static int create_strip_zones (mddev_t *mddev) | |||
166 | zone = conf->strip_zone + i; | 167 | zone = conf->strip_zone + i; |
167 | zone->dev = conf->strip_zone[i-1].dev + mddev->raid_disks; | 168 | zone->dev = conf->strip_zone[i-1].dev + mddev->raid_disks; |
168 | 169 | ||
169 | printk("raid0: zone %d\n", i); | 170 | printk(KERN_INFO "raid0: zone %d\n", i); |
170 | zone->dev_offset = current_offset; | 171 | zone->dev_start = current_start; |
171 | smallest = NULL; | 172 | smallest = NULL; |
172 | c = 0; | 173 | c = 0; |
173 | 174 | ||
174 | for (j=0; j<cnt; j++) { | 175 | for (j=0; j<cnt; j++) { |
175 | char b[BDEVNAME_SIZE]; | 176 | char b[BDEVNAME_SIZE]; |
176 | rdev = conf->strip_zone[0].dev[j]; | 177 | rdev = conf->strip_zone[0].dev[j]; |
177 | printk("raid0: checking %s ...", bdevname(rdev->bdev,b)); | 178 | printk(KERN_INFO "raid0: checking %s ...", |
178 | if (rdev->size > current_offset) | 179 | bdevname(rdev->bdev, b)); |
179 | { | 180 | if (rdev->size > current_start / 2) { |
180 | printk(" contained as device %d\n", c); | 181 | printk(KERN_INFO " contained as device %d\n", |
182 | c); | ||
181 | zone->dev[c] = rdev; | 183 | zone->dev[c] = rdev; |
182 | c++; | 184 | c++; |
183 | if (!smallest || (rdev->size <smallest->size)) { | 185 | if (!smallest || (rdev->size <smallest->size)) { |
184 | smallest = rdev; | 186 | smallest = rdev; |
185 | printk(" (%llu) is smallest!.\n", | 187 | printk(KERN_INFO " (%llu) is smallest!.\n", |
186 | (unsigned long long)rdev->size); | 188 | (unsigned long long)rdev->size); |
187 | } | 189 | } |
188 | } else | 190 | } else |
189 | printk(" nope.\n"); | 191 | printk(KERN_INFO " nope.\n"); |
190 | } | 192 | } |
191 | 193 | ||
192 | zone->nb_dev = c; | 194 | zone->nb_dev = c; |
193 | zone->size = (smallest->size - current_offset) * c; | 195 | zone->sectors = (smallest->size * 2 - current_start) * c; |
194 | printk("raid0: zone->nb_dev: %d, size: %llu\n", | 196 | printk(KERN_INFO "raid0: zone->nb_dev: %d, sectors: %llu\n", |
195 | zone->nb_dev, (unsigned long long)zone->size); | 197 | zone->nb_dev, (unsigned long long)zone->sectors); |
196 | 198 | ||
197 | zone->zone_offset = curr_zone_offset; | 199 | zone->zone_start = curr_zone_start; |
198 | curr_zone_offset += zone->size; | 200 | curr_zone_start += zone->sectors; |
199 | 201 | ||
200 | current_offset = smallest->size; | 202 | current_start = smallest->size * 2; |
201 | printk("raid0: current zone offset: %llu\n", | 203 | printk(KERN_INFO "raid0: current zone start: %llu\n", |
202 | (unsigned long long)current_offset); | 204 | (unsigned long long)current_start); |
203 | } | 205 | } |
204 | 206 | ||
205 | /* Now find appropriate hash spacing. | 207 | /* Now find appropriate hash spacing. |
@@ -210,16 +212,16 @@ static int create_strip_zones (mddev_t *mddev) | |||
210 | * strip though as it's size has no bearing on the efficacy of the hash | 212 | * strip though as it's size has no bearing on the efficacy of the hash |
211 | * table. | 213 | * table. |
212 | */ | 214 | */ |
213 | conf->hash_spacing = curr_zone_offset; | 215 | conf->spacing = curr_zone_start; |
214 | min_spacing = curr_zone_offset; | 216 | min_spacing = curr_zone_start; |
215 | sector_div(min_spacing, PAGE_SIZE/sizeof(struct strip_zone*)); | 217 | sector_div(min_spacing, PAGE_SIZE/sizeof(struct strip_zone*)); |
216 | for (i=0; i < conf->nr_strip_zones-1; i++) { | 218 | for (i=0; i < conf->nr_strip_zones-1; i++) { |
217 | sector_t sz = 0; | 219 | sector_t s = 0; |
218 | for (j=i; j<conf->nr_strip_zones-1 && | 220 | for (j = i; j < conf->nr_strip_zones - 1 && |
219 | sz < min_spacing ; j++) | 221 | s < min_spacing; j++) |
220 | sz += conf->strip_zone[j].size; | 222 | s += conf->strip_zone[j].sectors; |
221 | if (sz >= min_spacing && sz < conf->hash_spacing) | 223 | if (s >= min_spacing && s < conf->spacing) |
222 | conf->hash_spacing = sz; | 224 | conf->spacing = s; |
223 | } | 225 | } |
224 | 226 | ||
225 | mddev->queue->unplug_fn = raid0_unplug; | 227 | mddev->queue->unplug_fn = raid0_unplug; |
@@ -227,7 +229,7 @@ static int create_strip_zones (mddev_t *mddev) | |||
227 | mddev->queue->backing_dev_info.congested_fn = raid0_congested; | 229 | mddev->queue->backing_dev_info.congested_fn = raid0_congested; |
228 | mddev->queue->backing_dev_info.congested_data = mddev; | 230 | mddev->queue->backing_dev_info.congested_data = mddev; |
229 | 231 | ||
230 | printk("raid0: done.\n"); | 232 | printk(KERN_INFO "raid0: done.\n"); |
231 | return 0; | 233 | return 0; |
232 | abort: | 234 | abort: |
233 | return 1; | 235 | return 1; |
@@ -262,10 +264,9 @@ static int raid0_mergeable_bvec(struct request_queue *q, | |||
262 | static int raid0_run (mddev_t *mddev) | 264 | static int raid0_run (mddev_t *mddev) |
263 | { | 265 | { |
264 | unsigned cur=0, i=0, nb_zone; | 266 | unsigned cur=0, i=0, nb_zone; |
265 | s64 size; | 267 | s64 sectors; |
266 | raid0_conf_t *conf; | 268 | raid0_conf_t *conf; |
267 | mdk_rdev_t *rdev; | 269 | mdk_rdev_t *rdev; |
268 | struct list_head *tmp; | ||
269 | 270 | ||
270 | if (mddev->chunk_size == 0) { | 271 | if (mddev->chunk_size == 0) { |
271 | printk(KERN_ERR "md/raid0: non-zero chunk size required.\n"); | 272 | printk(KERN_ERR "md/raid0: non-zero chunk size required.\n"); |
@@ -291,54 +292,54 @@ static int raid0_run (mddev_t *mddev) | |||
291 | 292 | ||
292 | /* calculate array device size */ | 293 | /* calculate array device size */ |
293 | mddev->array_sectors = 0; | 294 | mddev->array_sectors = 0; |
294 | rdev_for_each(rdev, tmp, mddev) | 295 | list_for_each_entry(rdev, &mddev->disks, same_set) |
295 | mddev->array_sectors += rdev->size * 2; | 296 | mddev->array_sectors += rdev->size * 2; |
296 | 297 | ||
297 | printk("raid0 : md_size is %llu blocks.\n", | 298 | printk(KERN_INFO "raid0 : md_size is %llu sectors.\n", |
298 | (unsigned long long)mddev->array_sectors / 2); | 299 | (unsigned long long)mddev->array_sectors); |
299 | printk("raid0 : conf->hash_spacing is %llu blocks.\n", | 300 | printk(KERN_INFO "raid0 : conf->spacing is %llu sectors.\n", |
300 | (unsigned long long)conf->hash_spacing); | 301 | (unsigned long long)conf->spacing); |
301 | { | 302 | { |
302 | sector_t s = mddev->array_sectors / 2; | 303 | sector_t s = mddev->array_sectors; |
303 | sector_t space = conf->hash_spacing; | 304 | sector_t space = conf->spacing; |
304 | int round; | 305 | int round; |
305 | conf->preshift = 0; | 306 | conf->sector_shift = 0; |
306 | if (sizeof(sector_t) > sizeof(u32)) { | 307 | if (sizeof(sector_t) > sizeof(u32)) { |
307 | /*shift down space and s so that sector_div will work */ | 308 | /*shift down space and s so that sector_div will work */ |
308 | while (space > (sector_t) (~(u32)0)) { | 309 | while (space > (sector_t) (~(u32)0)) { |
309 | s >>= 1; | 310 | s >>= 1; |
310 | space >>= 1; | 311 | space >>= 1; |
311 | s += 1; /* force round-up */ | 312 | s += 1; /* force round-up */ |
312 | conf->preshift++; | 313 | conf->sector_shift++; |
313 | } | 314 | } |
314 | } | 315 | } |
315 | round = sector_div(s, (u32)space) ? 1 : 0; | 316 | round = sector_div(s, (u32)space) ? 1 : 0; |
316 | nb_zone = s + round; | 317 | nb_zone = s + round; |
317 | } | 318 | } |
318 | printk("raid0 : nb_zone is %d.\n", nb_zone); | 319 | printk(KERN_INFO "raid0 : nb_zone is %d.\n", nb_zone); |
319 | 320 | ||
320 | printk("raid0 : Allocating %Zd bytes for hash.\n", | 321 | printk(KERN_INFO "raid0 : Allocating %zu bytes for hash.\n", |
321 | nb_zone*sizeof(struct strip_zone*)); | 322 | nb_zone*sizeof(struct strip_zone*)); |
322 | conf->hash_table = kmalloc (sizeof (struct strip_zone *)*nb_zone, GFP_KERNEL); | 323 | conf->hash_table = kmalloc (sizeof (struct strip_zone *)*nb_zone, GFP_KERNEL); |
323 | if (!conf->hash_table) | 324 | if (!conf->hash_table) |
324 | goto out_free_conf; | 325 | goto out_free_conf; |
325 | size = conf->strip_zone[cur].size; | 326 | sectors = conf->strip_zone[cur].sectors; |
326 | 327 | ||
327 | conf->hash_table[0] = conf->strip_zone + cur; | 328 | conf->hash_table[0] = conf->strip_zone + cur; |
328 | for (i=1; i< nb_zone; i++) { | 329 | for (i=1; i< nb_zone; i++) { |
329 | while (size <= conf->hash_spacing) { | 330 | while (sectors <= conf->spacing) { |
330 | cur++; | 331 | cur++; |
331 | size += conf->strip_zone[cur].size; | 332 | sectors += conf->strip_zone[cur].sectors; |
332 | } | 333 | } |
333 | size -= conf->hash_spacing; | 334 | sectors -= conf->spacing; |
334 | conf->hash_table[i] = conf->strip_zone + cur; | 335 | conf->hash_table[i] = conf->strip_zone + cur; |
335 | } | 336 | } |
336 | if (conf->preshift) { | 337 | if (conf->sector_shift) { |
337 | conf->hash_spacing >>= conf->preshift; | 338 | conf->spacing >>= conf->sector_shift; |
338 | /* round hash_spacing up so when we divide by it, we | 339 | /* round spacing up so when we divide by it, we |
339 | * err on the side of too-low, which is safest | 340 | * err on the side of too-low, which is safest |
340 | */ | 341 | */ |
341 | conf->hash_spacing++; | 342 | conf->spacing++; |
342 | } | 343 | } |
343 | 344 | ||
344 | /* calculate the max read-ahead size. | 345 | /* calculate the max read-ahead size. |
@@ -387,12 +388,12 @@ static int raid0_stop (mddev_t *mddev) | |||
387 | static int raid0_make_request (struct request_queue *q, struct bio *bio) | 388 | static int raid0_make_request (struct request_queue *q, struct bio *bio) |
388 | { | 389 | { |
389 | mddev_t *mddev = q->queuedata; | 390 | mddev_t *mddev = q->queuedata; |
390 | unsigned int sect_in_chunk, chunksize_bits, chunk_size, chunk_sects; | 391 | unsigned int sect_in_chunk, chunksect_bits, chunk_sects; |
391 | raid0_conf_t *conf = mddev_to_conf(mddev); | 392 | raid0_conf_t *conf = mddev_to_conf(mddev); |
392 | struct strip_zone *zone; | 393 | struct strip_zone *zone; |
393 | mdk_rdev_t *tmp_dev; | 394 | mdk_rdev_t *tmp_dev; |
394 | sector_t chunk; | 395 | sector_t chunk; |
395 | sector_t block, rsect; | 396 | sector_t sector, rsect; |
396 | const int rw = bio_data_dir(bio); | 397 | const int rw = bio_data_dir(bio); |
397 | int cpu; | 398 | int cpu; |
398 | 399 | ||
@@ -407,11 +408,9 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio) | |||
407 | bio_sectors(bio)); | 408 | bio_sectors(bio)); |
408 | part_stat_unlock(); | 409 | part_stat_unlock(); |
409 | 410 | ||
410 | chunk_size = mddev->chunk_size >> 10; | ||
411 | chunk_sects = mddev->chunk_size >> 9; | 411 | chunk_sects = mddev->chunk_size >> 9; |
412 | chunksize_bits = ffz(~chunk_size); | 412 | chunksect_bits = ffz(~chunk_sects); |
413 | block = bio->bi_sector >> 1; | 413 | sector = bio->bi_sector; |
414 | |||
415 | 414 | ||
416 | if (unlikely(chunk_sects < (bio->bi_sector & (chunk_sects - 1)) + (bio->bi_size >> 9))) { | 415 | if (unlikely(chunk_sects < (bio->bi_sector & (chunk_sects - 1)) + (bio->bi_size >> 9))) { |
417 | struct bio_pair *bp; | 416 | struct bio_pair *bp; |
@@ -434,28 +433,27 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio) | |||
434 | 433 | ||
435 | 434 | ||
436 | { | 435 | { |
437 | sector_t x = block >> conf->preshift; | 436 | sector_t x = sector >> conf->sector_shift; |
438 | sector_div(x, (u32)conf->hash_spacing); | 437 | sector_div(x, (u32)conf->spacing); |
439 | zone = conf->hash_table[x]; | 438 | zone = conf->hash_table[x]; |
440 | } | 439 | } |
441 | 440 | ||
442 | while (block >= (zone->zone_offset + zone->size)) | 441 | while (sector >= zone->zone_start + zone->sectors) |
443 | zone++; | 442 | zone++; |
444 | 443 | ||
445 | sect_in_chunk = bio->bi_sector & ((chunk_size<<1) -1); | 444 | sect_in_chunk = bio->bi_sector & (chunk_sects - 1); |
446 | 445 | ||
447 | 446 | ||
448 | { | 447 | { |
449 | sector_t x = (block - zone->zone_offset) >> chunksize_bits; | 448 | sector_t x = (sector - zone->zone_start) >> chunksect_bits; |
450 | 449 | ||
451 | sector_div(x, zone->nb_dev); | 450 | sector_div(x, zone->nb_dev); |
452 | chunk = x; | 451 | chunk = x; |
453 | 452 | ||
454 | x = block >> chunksize_bits; | 453 | x = sector >> chunksect_bits; |
455 | tmp_dev = zone->dev[sector_div(x, zone->nb_dev)]; | 454 | tmp_dev = zone->dev[sector_div(x, zone->nb_dev)]; |
456 | } | 455 | } |
457 | rsect = (((chunk << chunksize_bits) + zone->dev_offset)<<1) | 456 | rsect = (chunk << chunksect_bits) + zone->dev_start + sect_in_chunk; |
458 | + sect_in_chunk; | ||
459 | 457 | ||
460 | bio->bi_bdev = tmp_dev->bdev; | 458 | bio->bi_bdev = tmp_dev->bdev; |
461 | bio->bi_sector = rsect + tmp_dev->data_offset; | 459 | bio->bi_sector = rsect + tmp_dev->data_offset; |
@@ -467,7 +465,7 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio) | |||
467 | 465 | ||
468 | bad_map: | 466 | bad_map: |
469 | printk("raid0_make_request bug: can't convert block across chunks" | 467 | printk("raid0_make_request bug: can't convert block across chunks" |
470 | " or bigger than %dk %llu %d\n", chunk_size, | 468 | " or bigger than %dk %llu %d\n", chunk_sects / 2, |
471 | (unsigned long long)bio->bi_sector, bio->bi_size >> 10); | 469 | (unsigned long long)bio->bi_sector, bio->bi_size >> 10); |
472 | 470 | ||
473 | bio_io_error(bio); | 471 | bio_io_error(bio); |
@@ -492,10 +490,10 @@ static void raid0_status (struct seq_file *seq, mddev_t *mddev) | |||
492 | seq_printf(seq, "%s/", bdevname( | 490 | seq_printf(seq, "%s/", bdevname( |
493 | conf->strip_zone[j].dev[k]->bdev,b)); | 491 | conf->strip_zone[j].dev[k]->bdev,b)); |
494 | 492 | ||
495 | seq_printf(seq, "] zo=%d do=%d s=%d\n", | 493 | seq_printf(seq, "] zs=%d ds=%d s=%d\n", |
496 | conf->strip_zone[j].zone_offset, | 494 | conf->strip_zone[j].zone_start, |
497 | conf->strip_zone[j].dev_offset, | 495 | conf->strip_zone[j].dev_start, |
498 | conf->strip_zone[j].size); | 496 | conf->strip_zone[j].sectors); |
499 | } | 497 | } |
500 | #endif | 498 | #endif |
501 | seq_printf(seq, " %dk chunks", mddev->chunk_size/1024); | 499 | seq_printf(seq, " %dk chunks", mddev->chunk_size/1024); |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 9c788e2489b1..7b4f5f7155d8 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -1016,12 +1016,16 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev) | |||
1016 | * else mark the drive as failed | 1016 | * else mark the drive as failed |
1017 | */ | 1017 | */ |
1018 | if (test_bit(In_sync, &rdev->flags) | 1018 | if (test_bit(In_sync, &rdev->flags) |
1019 | && (conf->raid_disks - mddev->degraded) == 1) | 1019 | && (conf->raid_disks - mddev->degraded) == 1) { |
1020 | /* | 1020 | /* |
1021 | * Don't fail the drive, act as though we were just a | 1021 | * Don't fail the drive, act as though we were just a |
1022 | * normal single drive | 1022 | * normal single drive. |
1023 | * However don't try a recovery from this drive as | ||
1024 | * it is very likely to fail. | ||
1023 | */ | 1025 | */ |
1026 | mddev->recovery_disabled = 1; | ||
1024 | return; | 1027 | return; |
1028 | } | ||
1025 | if (test_and_clear_bit(In_sync, &rdev->flags)) { | 1029 | if (test_and_clear_bit(In_sync, &rdev->flags)) { |
1026 | unsigned long flags; | 1030 | unsigned long flags; |
1027 | spin_lock_irqsave(&conf->device_lock, flags); | 1031 | spin_lock_irqsave(&conf->device_lock, flags); |
@@ -1919,7 +1923,6 @@ static int run(mddev_t *mddev) | |||
1919 | int i, j, disk_idx; | 1923 | int i, j, disk_idx; |
1920 | mirror_info_t *disk; | 1924 | mirror_info_t *disk; |
1921 | mdk_rdev_t *rdev; | 1925 | mdk_rdev_t *rdev; |
1922 | struct list_head *tmp; | ||
1923 | 1926 | ||
1924 | if (mddev->level != 1) { | 1927 | if (mddev->level != 1) { |
1925 | printk("raid1: %s: raid level not set to mirroring (%d)\n", | 1928 | printk("raid1: %s: raid level not set to mirroring (%d)\n", |
@@ -1964,7 +1967,7 @@ static int run(mddev_t *mddev) | |||
1964 | spin_lock_init(&conf->device_lock); | 1967 | spin_lock_init(&conf->device_lock); |
1965 | mddev->queue->queue_lock = &conf->device_lock; | 1968 | mddev->queue->queue_lock = &conf->device_lock; |
1966 | 1969 | ||
1967 | rdev_for_each(rdev, tmp, mddev) { | 1970 | list_for_each_entry(rdev, &mddev->disks, same_set) { |
1968 | disk_idx = rdev->raid_disk; | 1971 | disk_idx = rdev->raid_disk; |
1969 | if (disk_idx >= mddev->raid_disks | 1972 | if (disk_idx >= mddev->raid_disks |
1970 | || disk_idx < 0) | 1973 | || disk_idx < 0) |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 970a96ef9b18..6736d6dff981 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -2025,7 +2025,6 @@ static int run(mddev_t *mddev) | |||
2025 | int i, disk_idx; | 2025 | int i, disk_idx; |
2026 | mirror_info_t *disk; | 2026 | mirror_info_t *disk; |
2027 | mdk_rdev_t *rdev; | 2027 | mdk_rdev_t *rdev; |
2028 | struct list_head *tmp; | ||
2029 | int nc, fc, fo; | 2028 | int nc, fc, fo; |
2030 | sector_t stride, size; | 2029 | sector_t stride, size; |
2031 | 2030 | ||
@@ -2108,7 +2107,7 @@ static int run(mddev_t *mddev) | |||
2108 | spin_lock_init(&conf->device_lock); | 2107 | spin_lock_init(&conf->device_lock); |
2109 | mddev->queue->queue_lock = &conf->device_lock; | 2108 | mddev->queue->queue_lock = &conf->device_lock; |
2110 | 2109 | ||
2111 | rdev_for_each(rdev, tmp, mddev) { | 2110 | list_for_each_entry(rdev, &mddev->disks, same_set) { |
2112 | disk_idx = rdev->raid_disk; | 2111 | disk_idx = rdev->raid_disk; |
2113 | if (disk_idx >= mddev->raid_disks | 2112 | if (disk_idx >= mddev->raid_disks |
2114 | || disk_idx < 0) | 2113 | || disk_idx < 0) |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index a36a7435edf5..a5ba080d303b 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -3998,7 +3998,6 @@ static int run(mddev_t *mddev) | |||
3998 | int raid_disk, memory; | 3998 | int raid_disk, memory; |
3999 | mdk_rdev_t *rdev; | 3999 | mdk_rdev_t *rdev; |
4000 | struct disk_info *disk; | 4000 | struct disk_info *disk; |
4001 | struct list_head *tmp; | ||
4002 | int working_disks = 0; | 4001 | int working_disks = 0; |
4003 | 4002 | ||
4004 | if (mddev->level != 5 && mddev->level != 4 && mddev->level != 6) { | 4003 | if (mddev->level != 5 && mddev->level != 4 && mddev->level != 6) { |
@@ -4108,7 +4107,7 @@ static int run(mddev_t *mddev) | |||
4108 | 4107 | ||
4109 | pr_debug("raid5: run(%s) called.\n", mdname(mddev)); | 4108 | pr_debug("raid5: run(%s) called.\n", mdname(mddev)); |
4110 | 4109 | ||
4111 | rdev_for_each(rdev, tmp, mddev) { | 4110 | list_for_each_entry(rdev, &mddev->disks, same_set) { |
4112 | raid_disk = rdev->raid_disk; | 4111 | raid_disk = rdev->raid_disk; |
4113 | if (raid_disk >= conf->raid_disks | 4112 | if (raid_disk >= conf->raid_disks |
4114 | || raid_disk < 0) | 4113 | || raid_disk < 0) |
@@ -4533,7 +4532,6 @@ static int raid5_start_reshape(mddev_t *mddev) | |||
4533 | { | 4532 | { |
4534 | raid5_conf_t *conf = mddev_to_conf(mddev); | 4533 | raid5_conf_t *conf = mddev_to_conf(mddev); |
4535 | mdk_rdev_t *rdev; | 4534 | mdk_rdev_t *rdev; |
4536 | struct list_head *rtmp; | ||
4537 | int spares = 0; | 4535 | int spares = 0; |
4538 | int added_devices = 0; | 4536 | int added_devices = 0; |
4539 | unsigned long flags; | 4537 | unsigned long flags; |
@@ -4541,7 +4539,7 @@ static int raid5_start_reshape(mddev_t *mddev) | |||
4541 | if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) | 4539 | if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) |
4542 | return -EBUSY; | 4540 | return -EBUSY; |
4543 | 4541 | ||
4544 | rdev_for_each(rdev, rtmp, mddev) | 4542 | list_for_each_entry(rdev, &mddev->disks, same_set) |
4545 | if (rdev->raid_disk < 0 && | 4543 | if (rdev->raid_disk < 0 && |
4546 | !test_bit(Faulty, &rdev->flags)) | 4544 | !test_bit(Faulty, &rdev->flags)) |
4547 | spares++; | 4545 | spares++; |
@@ -4563,7 +4561,7 @@ static int raid5_start_reshape(mddev_t *mddev) | |||
4563 | /* Add some new drives, as many as will fit. | 4561 | /* Add some new drives, as many as will fit. |
4564 | * We know there are enough to make the newly sized array work. | 4562 | * We know there are enough to make the newly sized array work. |
4565 | */ | 4563 | */ |
4566 | rdev_for_each(rdev, rtmp, mddev) | 4564 | list_for_each_entry(rdev, &mddev->disks, same_set) |
4567 | if (rdev->raid_disk < 0 && | 4565 | if (rdev->raid_disk < 0 && |
4568 | !test_bit(Faulty, &rdev->flags)) { | 4566 | !test_bit(Faulty, &rdev->flags)) { |
4569 | if (raid5_add_disk(mddev, rdev) == 0) { | 4567 | if (raid5_add_disk(mddev, rdev) == 0) { |
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index fee7304102af..3949a1c73451 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig | |||
@@ -498,6 +498,18 @@ config SGI_GRU_DEBUG | |||
498 | This option enables addition debugging code for the SGI GRU driver. If | 498 | This option enables addition debugging code for the SGI GRU driver. If |
499 | you are unsure, say N. | 499 | you are unsure, say N. |
500 | 500 | ||
501 | config DELL_LAPTOP | ||
502 | tristate "Dell Laptop Extras (EXPERIMENTAL)" | ||
503 | depends on X86 | ||
504 | depends on DCDBAS | ||
505 | depends on EXPERIMENTAL | ||
506 | depends on BACKLIGHT_CLASS_DEVICE | ||
507 | depends on RFKILL | ||
508 | default n | ||
509 | ---help--- | ||
510 | This driver adds support for rfkill and backlight control to Dell | ||
511 | laptops. | ||
512 | |||
501 | source "drivers/misc/c2port/Kconfig" | 513 | source "drivers/misc/c2port/Kconfig" |
502 | 514 | ||
503 | endif # MISC_DEVICES | 515 | endif # MISC_DEVICES |
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index 817f7f5ab3bd..5de863a0e395 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile | |||
@@ -18,6 +18,7 @@ obj-$(CONFIG_ICS932S401) += ics932s401.o | |||
18 | obj-$(CONFIG_TC1100_WMI) += tc1100-wmi.o | 18 | obj-$(CONFIG_TC1100_WMI) += tc1100-wmi.o |
19 | obj-$(CONFIG_LKDTM) += lkdtm.o | 19 | obj-$(CONFIG_LKDTM) += lkdtm.o |
20 | obj-$(CONFIG_TIFM_CORE) += tifm_core.o | 20 | obj-$(CONFIG_TIFM_CORE) += tifm_core.o |
21 | obj-$(CONFIG_DELL_LAPTOP) += dell-laptop.o | ||
21 | obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o | 22 | obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o |
22 | obj-$(CONFIG_PHANTOM) += phantom.o | 23 | obj-$(CONFIG_PHANTOM) += phantom.o |
23 | obj-$(CONFIG_SGI_IOC4) += ioc4.o | 24 | obj-$(CONFIG_SGI_IOC4) += ioc4.o |
diff --git a/drivers/misc/dell-laptop.c b/drivers/misc/dell-laptop.c new file mode 100644 index 000000000000..4d33a2068b7a --- /dev/null +++ b/drivers/misc/dell-laptop.c | |||
@@ -0,0 +1,436 @@ | |||
1 | /* | ||
2 | * Driver for Dell laptop extras | ||
3 | * | ||
4 | * Copyright (c) Red Hat <mjg@redhat.com> | ||
5 | * | ||
6 | * Based on documentation in the libsmbios package, Copyright (C) 2005 Dell | ||
7 | * Inc. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | */ | ||
13 | |||
14 | #include <linux/module.h> | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/platform_device.h> | ||
18 | #include <linux/backlight.h> | ||
19 | #include <linux/err.h> | ||
20 | #include <linux/dmi.h> | ||
21 | #include <linux/io.h> | ||
22 | #include <linux/rfkill.h> | ||
23 | #include <linux/power_supply.h> | ||
24 | #include <linux/acpi.h> | ||
25 | #include "../firmware/dcdbas.h" | ||
26 | |||
27 | #define BRIGHTNESS_TOKEN 0x7d | ||
28 | |||
29 | /* This structure will be modified by the firmware when we enter | ||
30 | * system management mode, hence the volatiles */ | ||
31 | |||
32 | struct calling_interface_buffer { | ||
33 | u16 class; | ||
34 | u16 select; | ||
35 | volatile u32 input[4]; | ||
36 | volatile u32 output[4]; | ||
37 | } __packed; | ||
38 | |||
39 | struct calling_interface_token { | ||
40 | u16 tokenID; | ||
41 | u16 location; | ||
42 | union { | ||
43 | u16 value; | ||
44 | u16 stringlength; | ||
45 | }; | ||
46 | }; | ||
47 | |||
48 | struct calling_interface_structure { | ||
49 | struct dmi_header header; | ||
50 | u16 cmdIOAddress; | ||
51 | u8 cmdIOCode; | ||
52 | u32 supportedCmds; | ||
53 | struct calling_interface_token tokens[]; | ||
54 | } __packed; | ||
55 | |||
56 | static int da_command_address; | ||
57 | static int da_command_code; | ||
58 | static int da_num_tokens; | ||
59 | static struct calling_interface_token *da_tokens; | ||
60 | |||
61 | static struct backlight_device *dell_backlight_device; | ||
62 | static struct rfkill *wifi_rfkill; | ||
63 | static struct rfkill *bluetooth_rfkill; | ||
64 | static struct rfkill *wwan_rfkill; | ||
65 | |||
66 | static const struct dmi_system_id __initdata dell_device_table[] = { | ||
67 | { | ||
68 | .ident = "Dell laptop", | ||
69 | .matches = { | ||
70 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
71 | DMI_MATCH(DMI_CHASSIS_TYPE, "8"), | ||
72 | }, | ||
73 | }, | ||
74 | { } | ||
75 | }; | ||
76 | |||
77 | static void parse_da_table(const struct dmi_header *dm) | ||
78 | { | ||
79 | /* Final token is a terminator, so we don't want to copy it */ | ||
80 | int tokens = (dm->length-11)/sizeof(struct calling_interface_token)-1; | ||
81 | struct calling_interface_structure *table = | ||
82 | container_of(dm, struct calling_interface_structure, header); | ||
83 | |||
84 | /* 4 bytes of table header, plus 7 bytes of Dell header, plus at least | ||
85 | 6 bytes of entry */ | ||
86 | |||
87 | if (dm->length < 17) | ||
88 | return; | ||
89 | |||
90 | da_command_address = table->cmdIOAddress; | ||
91 | da_command_code = table->cmdIOCode; | ||
92 | |||
93 | da_tokens = krealloc(da_tokens, (da_num_tokens + tokens) * | ||
94 | sizeof(struct calling_interface_token), | ||
95 | GFP_KERNEL); | ||
96 | |||
97 | if (!da_tokens) | ||
98 | return; | ||
99 | |||
100 | memcpy(da_tokens+da_num_tokens, table->tokens, | ||
101 | sizeof(struct calling_interface_token) * tokens); | ||
102 | |||
103 | da_num_tokens += tokens; | ||
104 | } | ||
105 | |||
106 | static void find_tokens(const struct dmi_header *dm) | ||
107 | { | ||
108 | switch (dm->type) { | ||
109 | case 0xd4: /* Indexed IO */ | ||
110 | break; | ||
111 | case 0xd5: /* Protected Area Type 1 */ | ||
112 | break; | ||
113 | case 0xd6: /* Protected Area Type 2 */ | ||
114 | break; | ||
115 | case 0xda: /* Calling interface */ | ||
116 | parse_da_table(dm); | ||
117 | break; | ||
118 | } | ||
119 | } | ||
120 | |||
121 | static int find_token_location(int tokenid) | ||
122 | { | ||
123 | int i; | ||
124 | for (i = 0; i < da_num_tokens; i++) { | ||
125 | if (da_tokens[i].tokenID == tokenid) | ||
126 | return da_tokens[i].location; | ||
127 | } | ||
128 | |||
129 | return -1; | ||
130 | } | ||
131 | |||
132 | static struct calling_interface_buffer * | ||
133 | dell_send_request(struct calling_interface_buffer *buffer, int class, | ||
134 | int select) | ||
135 | { | ||
136 | struct smi_cmd command; | ||
137 | |||
138 | command.magic = SMI_CMD_MAGIC; | ||
139 | command.command_address = da_command_address; | ||
140 | command.command_code = da_command_code; | ||
141 | command.ebx = virt_to_phys(buffer); | ||
142 | command.ecx = 0x42534931; | ||
143 | |||
144 | buffer->class = class; | ||
145 | buffer->select = select; | ||
146 | |||
147 | dcdbas_smi_request(&command); | ||
148 | |||
149 | return buffer; | ||
150 | } | ||
151 | |||
152 | /* Derived from information in DellWirelessCtl.cpp: | ||
153 | Class 17, select 11 is radio control. It returns an array of 32-bit values. | ||
154 | |||
155 | result[0]: return code | ||
156 | result[1]: | ||
157 | Bit 0: Hardware switch supported | ||
158 | Bit 1: Wifi locator supported | ||
159 | Bit 2: Wifi is supported | ||
160 | Bit 3: Bluetooth is supported | ||
161 | Bit 4: WWAN is supported | ||
162 | Bit 5: Wireless keyboard supported | ||
163 | Bits 6-7: Reserved | ||
164 | Bit 8: Wifi is installed | ||
165 | Bit 9: Bluetooth is installed | ||
166 | Bit 10: WWAN is installed | ||
167 | Bits 11-15: Reserved | ||
168 | Bit 16: Hardware switch is on | ||
169 | Bit 17: Wifi is blocked | ||
170 | Bit 18: Bluetooth is blocked | ||
171 | Bit 19: WWAN is blocked | ||
172 | Bits 20-31: Reserved | ||
173 | result[2]: NVRAM size in bytes | ||
174 | result[3]: NVRAM format version number | ||
175 | */ | ||
176 | |||
177 | static int dell_rfkill_set(int radio, enum rfkill_state state) | ||
178 | { | ||
179 | struct calling_interface_buffer buffer; | ||
180 | int disable = (state == RFKILL_STATE_UNBLOCKED) ? 0 : 1; | ||
181 | |||
182 | memset(&buffer, 0, sizeof(struct calling_interface_buffer)); | ||
183 | buffer.input[0] = (1 | (radio<<8) | (disable << 16)); | ||
184 | dell_send_request(&buffer, 17, 11); | ||
185 | |||
186 | return 0; | ||
187 | } | ||
188 | |||
189 | static int dell_wifi_set(void *data, enum rfkill_state state) | ||
190 | { | ||
191 | return dell_rfkill_set(1, state); | ||
192 | } | ||
193 | |||
194 | static int dell_bluetooth_set(void *data, enum rfkill_state state) | ||
195 | { | ||
196 | return dell_rfkill_set(2, state); | ||
197 | } | ||
198 | |||
199 | static int dell_wwan_set(void *data, enum rfkill_state state) | ||
200 | { | ||
201 | return dell_rfkill_set(3, state); | ||
202 | } | ||
203 | |||
204 | static int dell_rfkill_get(int bit, enum rfkill_state *state) | ||
205 | { | ||
206 | struct calling_interface_buffer buffer; | ||
207 | int status; | ||
208 | int new_state = RFKILL_STATE_HARD_BLOCKED; | ||
209 | |||
210 | memset(&buffer, 0, sizeof(struct calling_interface_buffer)); | ||
211 | dell_send_request(&buffer, 17, 11); | ||
212 | status = buffer.output[1]; | ||
213 | |||
214 | if (status & (1<<16)) | ||
215 | new_state = RFKILL_STATE_SOFT_BLOCKED; | ||
216 | |||
217 | if (status & (1<<bit)) | ||
218 | *state = new_state; | ||
219 | else | ||
220 | *state = RFKILL_STATE_UNBLOCKED; | ||
221 | |||
222 | return 0; | ||
223 | } | ||
224 | |||
225 | static int dell_wifi_get(void *data, enum rfkill_state *state) | ||
226 | { | ||
227 | return dell_rfkill_get(17, state); | ||
228 | } | ||
229 | |||
230 | static int dell_bluetooth_get(void *data, enum rfkill_state *state) | ||
231 | { | ||
232 | return dell_rfkill_get(18, state); | ||
233 | } | ||
234 | |||
235 | static int dell_wwan_get(void *data, enum rfkill_state *state) | ||
236 | { | ||
237 | return dell_rfkill_get(19, state); | ||
238 | } | ||
239 | |||
240 | static int dell_setup_rfkill(void) | ||
241 | { | ||
242 | struct calling_interface_buffer buffer; | ||
243 | int status; | ||
244 | int ret; | ||
245 | |||
246 | memset(&buffer, 0, sizeof(struct calling_interface_buffer)); | ||
247 | dell_send_request(&buffer, 17, 11); | ||
248 | status = buffer.output[1]; | ||
249 | |||
250 | if ((status & (1<<2|1<<8)) == (1<<2|1<<8)) { | ||
251 | wifi_rfkill = rfkill_allocate(NULL, RFKILL_TYPE_WLAN); | ||
252 | if (!wifi_rfkill) | ||
253 | goto err_wifi; | ||
254 | wifi_rfkill->name = "dell-wifi"; | ||
255 | wifi_rfkill->toggle_radio = dell_wifi_set; | ||
256 | wifi_rfkill->get_state = dell_wifi_get; | ||
257 | ret = rfkill_register(wifi_rfkill); | ||
258 | if (ret) | ||
259 | goto err_wifi; | ||
260 | } | ||
261 | |||
262 | if ((status & (1<<3|1<<9)) == (1<<3|1<<9)) { | ||
263 | bluetooth_rfkill = rfkill_allocate(NULL, RFKILL_TYPE_BLUETOOTH); | ||
264 | if (!bluetooth_rfkill) | ||
265 | goto err_bluetooth; | ||
266 | bluetooth_rfkill->name = "dell-bluetooth"; | ||
267 | bluetooth_rfkill->toggle_radio = dell_bluetooth_set; | ||
268 | bluetooth_rfkill->get_state = dell_bluetooth_get; | ||
269 | ret = rfkill_register(bluetooth_rfkill); | ||
270 | if (ret) | ||
271 | goto err_bluetooth; | ||
272 | } | ||
273 | |||
274 | if ((status & (1<<4|1<<10)) == (1<<4|1<<10)) { | ||
275 | wwan_rfkill = rfkill_allocate(NULL, RFKILL_TYPE_WWAN); | ||
276 | if (!wwan_rfkill) | ||
277 | goto err_wwan; | ||
278 | wwan_rfkill->name = "dell-wwan"; | ||
279 | wwan_rfkill->toggle_radio = dell_wwan_set; | ||
280 | wwan_rfkill->get_state = dell_wwan_get; | ||
281 | ret = rfkill_register(wwan_rfkill); | ||
282 | if (ret) | ||
283 | goto err_wwan; | ||
284 | } | ||
285 | |||
286 | return 0; | ||
287 | err_wwan: | ||
288 | if (wwan_rfkill) | ||
289 | rfkill_free(wwan_rfkill); | ||
290 | if (bluetooth_rfkill) { | ||
291 | rfkill_unregister(bluetooth_rfkill); | ||
292 | bluetooth_rfkill = NULL; | ||
293 | } | ||
294 | err_bluetooth: | ||
295 | if (bluetooth_rfkill) | ||
296 | rfkill_free(bluetooth_rfkill); | ||
297 | if (wifi_rfkill) { | ||
298 | rfkill_unregister(wifi_rfkill); | ||
299 | wifi_rfkill = NULL; | ||
300 | } | ||
301 | err_wifi: | ||
302 | if (wifi_rfkill) | ||
303 | rfkill_free(wifi_rfkill); | ||
304 | |||
305 | return ret; | ||
306 | } | ||
307 | |||
308 | static int dell_send_intensity(struct backlight_device *bd) | ||
309 | { | ||
310 | struct calling_interface_buffer buffer; | ||
311 | |||
312 | memset(&buffer, 0, sizeof(struct calling_interface_buffer)); | ||
313 | buffer.input[0] = find_token_location(BRIGHTNESS_TOKEN); | ||
314 | buffer.input[1] = bd->props.brightness; | ||
315 | |||
316 | if (buffer.input[0] == -1) | ||
317 | return -ENODEV; | ||
318 | |||
319 | if (power_supply_is_system_supplied() > 0) | ||
320 | dell_send_request(&buffer, 1, 2); | ||
321 | else | ||
322 | dell_send_request(&buffer, 1, 1); | ||
323 | |||
324 | return 0; | ||
325 | } | ||
326 | |||
327 | static int dell_get_intensity(struct backlight_device *bd) | ||
328 | { | ||
329 | struct calling_interface_buffer buffer; | ||
330 | |||
331 | memset(&buffer, 0, sizeof(struct calling_interface_buffer)); | ||
332 | buffer.input[0] = find_token_location(BRIGHTNESS_TOKEN); | ||
333 | |||
334 | if (buffer.input[0] == -1) | ||
335 | return -ENODEV; | ||
336 | |||
337 | if (power_supply_is_system_supplied() > 0) | ||
338 | dell_send_request(&buffer, 0, 2); | ||
339 | else | ||
340 | dell_send_request(&buffer, 0, 1); | ||
341 | |||
342 | return buffer.output[1]; | ||
343 | } | ||
344 | |||
345 | static struct backlight_ops dell_ops = { | ||
346 | .get_brightness = dell_get_intensity, | ||
347 | .update_status = dell_send_intensity, | ||
348 | }; | ||
349 | |||
350 | static int __init dell_init(void) | ||
351 | { | ||
352 | struct calling_interface_buffer buffer; | ||
353 | int max_intensity = 0; | ||
354 | int ret; | ||
355 | |||
356 | if (!dmi_check_system(dell_device_table)) | ||
357 | return -ENODEV; | ||
358 | |||
359 | dmi_walk(find_tokens); | ||
360 | |||
361 | if (!da_tokens) { | ||
362 | printk(KERN_INFO "dell-laptop: Unable to find dmi tokens\n"); | ||
363 | return -ENODEV; | ||
364 | } | ||
365 | |||
366 | ret = dell_setup_rfkill(); | ||
367 | |||
368 | if (ret) { | ||
369 | printk(KERN_WARNING "dell-laptop: Unable to setup rfkill\n"); | ||
370 | goto out; | ||
371 | } | ||
372 | |||
373 | #ifdef CONFIG_ACPI | ||
374 | /* In the event of an ACPI backlight being available, don't | ||
375 | * register the platform controller. | ||
376 | */ | ||
377 | if (acpi_video_backlight_support()) | ||
378 | return 0; | ||
379 | #endif | ||
380 | |||
381 | memset(&buffer, 0, sizeof(struct calling_interface_buffer)); | ||
382 | buffer.input[0] = find_token_location(BRIGHTNESS_TOKEN); | ||
383 | |||
384 | if (buffer.input[0] != -1) { | ||
385 | dell_send_request(&buffer, 0, 2); | ||
386 | max_intensity = buffer.output[3]; | ||
387 | } | ||
388 | |||
389 | if (max_intensity) { | ||
390 | dell_backlight_device = backlight_device_register( | ||
391 | "dell_backlight", | ||
392 | NULL, NULL, | ||
393 | &dell_ops); | ||
394 | |||
395 | if (IS_ERR(dell_backlight_device)) { | ||
396 | ret = PTR_ERR(dell_backlight_device); | ||
397 | dell_backlight_device = NULL; | ||
398 | goto out; | ||
399 | } | ||
400 | |||
401 | dell_backlight_device->props.max_brightness = max_intensity; | ||
402 | dell_backlight_device->props.brightness = | ||
403 | dell_get_intensity(dell_backlight_device); | ||
404 | backlight_update_status(dell_backlight_device); | ||
405 | } | ||
406 | |||
407 | return 0; | ||
408 | out: | ||
409 | if (wifi_rfkill) | ||
410 | rfkill_unregister(wifi_rfkill); | ||
411 | if (bluetooth_rfkill) | ||
412 | rfkill_unregister(bluetooth_rfkill); | ||
413 | if (wwan_rfkill) | ||
414 | rfkill_unregister(wwan_rfkill); | ||
415 | kfree(da_tokens); | ||
416 | return ret; | ||
417 | } | ||
418 | |||
419 | static void __exit dell_exit(void) | ||
420 | { | ||
421 | backlight_device_unregister(dell_backlight_device); | ||
422 | if (wifi_rfkill) | ||
423 | rfkill_unregister(wifi_rfkill); | ||
424 | if (bluetooth_rfkill) | ||
425 | rfkill_unregister(bluetooth_rfkill); | ||
426 | if (wwan_rfkill) | ||
427 | rfkill_unregister(wwan_rfkill); | ||
428 | } | ||
429 | |||
430 | module_init(dell_init); | ||
431 | module_exit(dell_exit); | ||
432 | |||
433 | MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>"); | ||
434 | MODULE_DESCRIPTION("Dell laptop driver"); | ||
435 | MODULE_LICENSE("GPL"); | ||
436 | MODULE_ALIAS("dmi:*svnDellInc.:*:ct8:*"); | ||
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig index 6fde0a2e3567..bc33200535fc 100644 --- a/drivers/mtd/devices/Kconfig +++ b/drivers/mtd/devices/Kconfig | |||
@@ -120,6 +120,13 @@ config MTD_PHRAM | |||
120 | doesn't have access to, memory beyond the mem=xxx limit, nvram, | 120 | doesn't have access to, memory beyond the mem=xxx limit, nvram, |
121 | memory on the video card, etc... | 121 | memory on the video card, etc... |
122 | 122 | ||
123 | config MTD_PS3VRAM | ||
124 | tristate "PS3 video RAM" | ||
125 | depends on FB_PS3 | ||
126 | help | ||
127 | This driver allows you to use excess PS3 video RAM as volatile | ||
128 | storage or system swap. | ||
129 | |||
123 | config MTD_LART | 130 | config MTD_LART |
124 | tristate "28F160xx flash driver for LART" | 131 | tristate "28F160xx flash driver for LART" |
125 | depends on SA1100_LART | 132 | depends on SA1100_LART |
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile index 0993d5cf3923..e51521df4e40 100644 --- a/drivers/mtd/devices/Makefile +++ b/drivers/mtd/devices/Makefile | |||
@@ -16,3 +16,4 @@ obj-$(CONFIG_MTD_LART) += lart.o | |||
16 | obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o | 16 | obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o |
17 | obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o | 17 | obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o |
18 | obj-$(CONFIG_MTD_M25P80) += m25p80.o | 18 | obj-$(CONFIG_MTD_M25P80) += m25p80.o |
19 | obj-$(CONFIG_MTD_PS3VRAM) += ps3vram.o | ||
diff --git a/drivers/mtd/devices/ps3vram.c b/drivers/mtd/devices/ps3vram.c new file mode 100644 index 000000000000..d21e9beb7ed2 --- /dev/null +++ b/drivers/mtd/devices/ps3vram.c | |||
@@ -0,0 +1,768 @@ | |||
1 | /** | ||
2 | * ps3vram - Use extra PS3 video ram as MTD block device. | ||
3 | * | ||
4 | * Copyright (c) 2007-2008 Jim Paris <jim@jtan.com> | ||
5 | * Added support RSX DMA Vivien Chappelier <vivien.chappelier@free.fr> | ||
6 | */ | ||
7 | |||
8 | #include <linux/io.h> | ||
9 | #include <linux/mm.h> | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/list.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/moduleparam.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include <linux/version.h> | ||
17 | #include <linux/gfp.h> | ||
18 | #include <linux/delay.h> | ||
19 | #include <linux/mtd/mtd.h> | ||
20 | |||
21 | #include <asm/lv1call.h> | ||
22 | #include <asm/ps3.h> | ||
23 | |||
24 | #define DEVICE_NAME "ps3vram" | ||
25 | |||
26 | #define XDR_BUF_SIZE (2 * 1024 * 1024) /* XDR buffer (must be 1MiB aligned) */ | ||
27 | #define XDR_IOIF 0x0c000000 | ||
28 | |||
29 | #define FIFO_BASE XDR_IOIF | ||
30 | #define FIFO_SIZE (64 * 1024) | ||
31 | |||
32 | #define DMA_PAGE_SIZE (4 * 1024) | ||
33 | |||
34 | #define CACHE_PAGE_SIZE (256 * 1024) | ||
35 | #define CACHE_PAGE_COUNT ((XDR_BUF_SIZE - FIFO_SIZE) / CACHE_PAGE_SIZE) | ||
36 | |||
37 | #define CACHE_OFFSET CACHE_PAGE_SIZE | ||
38 | #define FIFO_OFFSET 0 | ||
39 | |||
40 | #define CTRL_PUT 0x10 | ||
41 | #define CTRL_GET 0x11 | ||
42 | #define CTRL_TOP 0x15 | ||
43 | |||
44 | #define UPLOAD_SUBCH 1 | ||
45 | #define DOWNLOAD_SUBCH 2 | ||
46 | |||
47 | #define NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN 0x0000030c | ||
48 | #define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY 0x00000104 | ||
49 | |||
50 | #define L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT 0x601 | ||
51 | |||
52 | struct mtd_info ps3vram_mtd; | ||
53 | |||
54 | #define CACHE_PAGE_PRESENT 1 | ||
55 | #define CACHE_PAGE_DIRTY 2 | ||
56 | |||
57 | struct ps3vram_tag { | ||
58 | unsigned int address; | ||
59 | unsigned int flags; | ||
60 | }; | ||
61 | |||
62 | struct ps3vram_cache { | ||
63 | unsigned int page_count; | ||
64 | unsigned int page_size; | ||
65 | struct ps3vram_tag *tags; | ||
66 | }; | ||
67 | |||
68 | struct ps3vram_priv { | ||
69 | u64 memory_handle; | ||
70 | u64 context_handle; | ||
71 | u32 *ctrl; | ||
72 | u32 *reports; | ||
73 | u8 __iomem *ddr_base; | ||
74 | u8 *xdr_buf; | ||
75 | |||
76 | u32 *fifo_base; | ||
77 | u32 *fifo_ptr; | ||
78 | |||
79 | struct device *dev; | ||
80 | struct ps3vram_cache cache; | ||
81 | |||
82 | /* Used to serialize cache/DMA operations */ | ||
83 | struct mutex lock; | ||
84 | }; | ||
85 | |||
86 | #define DMA_NOTIFIER_HANDLE_BASE 0x66604200 /* first DMA notifier handle */ | ||
87 | #define DMA_NOTIFIER_OFFSET_BASE 0x1000 /* first DMA notifier offset */ | ||
88 | #define DMA_NOTIFIER_SIZE 0x40 | ||
89 | #define NOTIFIER 7 /* notifier used for completion report */ | ||
90 | |||
91 | /* A trailing '-' means to subtract off ps3fb_videomemory.size */ | ||
92 | char *size = "256M-"; | ||
93 | module_param(size, charp, 0); | ||
94 | MODULE_PARM_DESC(size, "memory size"); | ||
95 | |||
96 | static u32 *ps3vram_get_notifier(u32 *reports, int notifier) | ||
97 | { | ||
98 | return (void *) reports + | ||
99 | DMA_NOTIFIER_OFFSET_BASE + | ||
100 | DMA_NOTIFIER_SIZE * notifier; | ||
101 | } | ||
102 | |||
103 | static void ps3vram_notifier_reset(struct mtd_info *mtd) | ||
104 | { | ||
105 | int i; | ||
106 | |||
107 | struct ps3vram_priv *priv = mtd->priv; | ||
108 | u32 *notify = ps3vram_get_notifier(priv->reports, NOTIFIER); | ||
109 | for (i = 0; i < 4; i++) | ||
110 | notify[i] = 0xffffffff; | ||
111 | } | ||
112 | |||
113 | static int ps3vram_notifier_wait(struct mtd_info *mtd, unsigned int timeout_ms) | ||
114 | { | ||
115 | struct ps3vram_priv *priv = mtd->priv; | ||
116 | u32 *notify = ps3vram_get_notifier(priv->reports, NOTIFIER); | ||
117 | unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); | ||
118 | |||
119 | do { | ||
120 | if (!notify[3]) | ||
121 | return 0; | ||
122 | msleep(1); | ||
123 | } while (time_before(jiffies, timeout)); | ||
124 | |||
125 | return -ETIMEDOUT; | ||
126 | } | ||
127 | |||
128 | static void ps3vram_init_ring(struct mtd_info *mtd) | ||
129 | { | ||
130 | struct ps3vram_priv *priv = mtd->priv; | ||
131 | |||
132 | priv->ctrl[CTRL_PUT] = FIFO_BASE + FIFO_OFFSET; | ||
133 | priv->ctrl[CTRL_GET] = FIFO_BASE + FIFO_OFFSET; | ||
134 | } | ||
135 | |||
136 | static int ps3vram_wait_ring(struct mtd_info *mtd, unsigned int timeout_ms) | ||
137 | { | ||
138 | struct ps3vram_priv *priv = mtd->priv; | ||
139 | unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); | ||
140 | |||
141 | do { | ||
142 | if (priv->ctrl[CTRL_PUT] == priv->ctrl[CTRL_GET]) | ||
143 | return 0; | ||
144 | msleep(1); | ||
145 | } while (time_before(jiffies, timeout)); | ||
146 | |||
147 | dev_dbg(priv->dev, "%s:%d: FIFO timeout (%08x/%08x/%08x)\n", __func__, | ||
148 | __LINE__, priv->ctrl[CTRL_PUT], priv->ctrl[CTRL_GET], | ||
149 | priv->ctrl[CTRL_TOP]); | ||
150 | |||
151 | return -ETIMEDOUT; | ||
152 | } | ||
153 | |||
154 | static void ps3vram_out_ring(struct ps3vram_priv *priv, u32 data) | ||
155 | { | ||
156 | *(priv->fifo_ptr)++ = data; | ||
157 | } | ||
158 | |||
159 | static void ps3vram_begin_ring(struct ps3vram_priv *priv, u32 chan, | ||
160 | u32 tag, u32 size) | ||
161 | { | ||
162 | ps3vram_out_ring(priv, (size << 18) | (chan << 13) | tag); | ||
163 | } | ||
164 | |||
165 | static void ps3vram_rewind_ring(struct mtd_info *mtd) | ||
166 | { | ||
167 | struct ps3vram_priv *priv = mtd->priv; | ||
168 | u64 status; | ||
169 | |||
170 | ps3vram_out_ring(priv, 0x20000000 | (FIFO_BASE + FIFO_OFFSET)); | ||
171 | |||
172 | priv->ctrl[CTRL_PUT] = FIFO_BASE + FIFO_OFFSET; | ||
173 | |||
174 | /* asking the HV for a blit will kick the fifo */ | ||
175 | status = lv1_gpu_context_attribute(priv->context_handle, | ||
176 | L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT, | ||
177 | 0, 0, 0, 0); | ||
178 | if (status) | ||
179 | dev_err(priv->dev, "%s:%d: lv1_gpu_context_attribute failed\n", | ||
180 | __func__, __LINE__); | ||
181 | |||
182 | priv->fifo_ptr = priv->fifo_base; | ||
183 | } | ||
184 | |||
185 | static void ps3vram_fire_ring(struct mtd_info *mtd) | ||
186 | { | ||
187 | struct ps3vram_priv *priv = mtd->priv; | ||
188 | u64 status; | ||
189 | |||
190 | mutex_lock(&ps3_gpu_mutex); | ||
191 | |||
192 | priv->ctrl[CTRL_PUT] = FIFO_BASE + FIFO_OFFSET + | ||
193 | (priv->fifo_ptr - priv->fifo_base) * sizeof(u32); | ||
194 | |||
195 | /* asking the HV for a blit will kick the fifo */ | ||
196 | status = lv1_gpu_context_attribute(priv->context_handle, | ||
197 | L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT, | ||
198 | 0, 0, 0, 0); | ||
199 | if (status) | ||
200 | dev_err(priv->dev, "%s:%d: lv1_gpu_context_attribute failed\n", | ||
201 | __func__, __LINE__); | ||
202 | |||
203 | if ((priv->fifo_ptr - priv->fifo_base) * sizeof(u32) > | ||
204 | FIFO_SIZE - 1024) { | ||
205 | dev_dbg(priv->dev, "%s:%d: fifo full, rewinding\n", __func__, | ||
206 | __LINE__); | ||
207 | ps3vram_wait_ring(mtd, 200); | ||
208 | ps3vram_rewind_ring(mtd); | ||
209 | } | ||
210 | |||
211 | mutex_unlock(&ps3_gpu_mutex); | ||
212 | } | ||
213 | |||
214 | static void ps3vram_bind(struct mtd_info *mtd) | ||
215 | { | ||
216 | struct ps3vram_priv *priv = mtd->priv; | ||
217 | |||
218 | ps3vram_begin_ring(priv, UPLOAD_SUBCH, 0, 1); | ||
219 | ps3vram_out_ring(priv, 0x31337303); | ||
220 | ps3vram_begin_ring(priv, UPLOAD_SUBCH, 0x180, 3); | ||
221 | ps3vram_out_ring(priv, DMA_NOTIFIER_HANDLE_BASE + NOTIFIER); | ||
222 | ps3vram_out_ring(priv, 0xfeed0001); /* DMA system RAM instance */ | ||
223 | ps3vram_out_ring(priv, 0xfeed0000); /* DMA video RAM instance */ | ||
224 | |||
225 | ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, 0, 1); | ||
226 | ps3vram_out_ring(priv, 0x3137c0de); | ||
227 | ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, 0x180, 3); | ||
228 | ps3vram_out_ring(priv, DMA_NOTIFIER_HANDLE_BASE + NOTIFIER); | ||
229 | ps3vram_out_ring(priv, 0xfeed0000); /* DMA video RAM instance */ | ||
230 | ps3vram_out_ring(priv, 0xfeed0001); /* DMA system RAM instance */ | ||
231 | |||
232 | ps3vram_fire_ring(mtd); | ||
233 | } | ||
234 | |||
235 | static int ps3vram_upload(struct mtd_info *mtd, unsigned int src_offset, | ||
236 | unsigned int dst_offset, int len, int count) | ||
237 | { | ||
238 | struct ps3vram_priv *priv = mtd->priv; | ||
239 | |||
240 | ps3vram_begin_ring(priv, UPLOAD_SUBCH, | ||
241 | NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); | ||
242 | ps3vram_out_ring(priv, XDR_IOIF + src_offset); | ||
243 | ps3vram_out_ring(priv, dst_offset); | ||
244 | ps3vram_out_ring(priv, len); | ||
245 | ps3vram_out_ring(priv, len); | ||
246 | ps3vram_out_ring(priv, len); | ||
247 | ps3vram_out_ring(priv, count); | ||
248 | ps3vram_out_ring(priv, (1 << 8) | 1); | ||
249 | ps3vram_out_ring(priv, 0); | ||
250 | |||
251 | ps3vram_notifier_reset(mtd); | ||
252 | ps3vram_begin_ring(priv, UPLOAD_SUBCH, | ||
253 | NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY, 1); | ||
254 | ps3vram_out_ring(priv, 0); | ||
255 | ps3vram_begin_ring(priv, UPLOAD_SUBCH, 0x100, 1); | ||
256 | ps3vram_out_ring(priv, 0); | ||
257 | ps3vram_fire_ring(mtd); | ||
258 | if (ps3vram_notifier_wait(mtd, 200) < 0) { | ||
259 | dev_dbg(priv->dev, "%s:%d: notifier timeout\n", __func__, | ||
260 | __LINE__); | ||
261 | return -1; | ||
262 | } | ||
263 | |||
264 | return 0; | ||
265 | } | ||
266 | |||
267 | static int ps3vram_download(struct mtd_info *mtd, unsigned int src_offset, | ||
268 | unsigned int dst_offset, int len, int count) | ||
269 | { | ||
270 | struct ps3vram_priv *priv = mtd->priv; | ||
271 | |||
272 | ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, | ||
273 | NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); | ||
274 | ps3vram_out_ring(priv, src_offset); | ||
275 | ps3vram_out_ring(priv, XDR_IOIF + dst_offset); | ||
276 | ps3vram_out_ring(priv, len); | ||
277 | ps3vram_out_ring(priv, len); | ||
278 | ps3vram_out_ring(priv, len); | ||
279 | ps3vram_out_ring(priv, count); | ||
280 | ps3vram_out_ring(priv, (1 << 8) | 1); | ||
281 | ps3vram_out_ring(priv, 0); | ||
282 | |||
283 | ps3vram_notifier_reset(mtd); | ||
284 | ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, | ||
285 | NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY, 1); | ||
286 | ps3vram_out_ring(priv, 0); | ||
287 | ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, 0x100, 1); | ||
288 | ps3vram_out_ring(priv, 0); | ||
289 | ps3vram_fire_ring(mtd); | ||
290 | if (ps3vram_notifier_wait(mtd, 200) < 0) { | ||
291 | dev_dbg(priv->dev, "%s:%d: notifier timeout\n", __func__, | ||
292 | __LINE__); | ||
293 | return -1; | ||
294 | } | ||
295 | |||
296 | return 0; | ||
297 | } | ||
298 | |||
299 | static void ps3vram_cache_evict(struct mtd_info *mtd, int entry) | ||
300 | { | ||
301 | struct ps3vram_priv *priv = mtd->priv; | ||
302 | struct ps3vram_cache *cache = &priv->cache; | ||
303 | |||
304 | if (cache->tags[entry].flags & CACHE_PAGE_DIRTY) { | ||
305 | dev_dbg(priv->dev, "%s:%d: flushing %d : 0x%08x\n", __func__, | ||
306 | __LINE__, entry, cache->tags[entry].address); | ||
307 | if (ps3vram_upload(mtd, | ||
308 | CACHE_OFFSET + entry * cache->page_size, | ||
309 | cache->tags[entry].address, | ||
310 | DMA_PAGE_SIZE, | ||
311 | cache->page_size / DMA_PAGE_SIZE) < 0) { | ||
312 | dev_dbg(priv->dev, "%s:%d: failed to upload from " | ||
313 | "0x%x to 0x%x size 0x%x\n", __func__, __LINE__, | ||
314 | entry * cache->page_size, | ||
315 | cache->tags[entry].address, cache->page_size); | ||
316 | } | ||
317 | cache->tags[entry].flags &= ~CACHE_PAGE_DIRTY; | ||
318 | } | ||
319 | } | ||
320 | |||
321 | static void ps3vram_cache_load(struct mtd_info *mtd, int entry, | ||
322 | unsigned int address) | ||
323 | { | ||
324 | struct ps3vram_priv *priv = mtd->priv; | ||
325 | struct ps3vram_cache *cache = &priv->cache; | ||
326 | |||
327 | dev_dbg(priv->dev, "%s:%d: fetching %d : 0x%08x\n", __func__, __LINE__, | ||
328 | entry, address); | ||
329 | if (ps3vram_download(mtd, | ||
330 | address, | ||
331 | CACHE_OFFSET + entry * cache->page_size, | ||
332 | DMA_PAGE_SIZE, | ||
333 | cache->page_size / DMA_PAGE_SIZE) < 0) { | ||
334 | dev_err(priv->dev, "%s:%d: failed to download from " | ||
335 | "0x%x to 0x%x size 0x%x\n", __func__, __LINE__, address, | ||
336 | entry * cache->page_size, cache->page_size); | ||
337 | } | ||
338 | |||
339 | cache->tags[entry].address = address; | ||
340 | cache->tags[entry].flags |= CACHE_PAGE_PRESENT; | ||
341 | } | ||
342 | |||
343 | |||
344 | static void ps3vram_cache_flush(struct mtd_info *mtd) | ||
345 | { | ||
346 | struct ps3vram_priv *priv = mtd->priv; | ||
347 | struct ps3vram_cache *cache = &priv->cache; | ||
348 | int i; | ||
349 | |||
350 | dev_dbg(priv->dev, "%s:%d: FLUSH\n", __func__, __LINE__); | ||
351 | for (i = 0; i < cache->page_count; i++) { | ||
352 | ps3vram_cache_evict(mtd, i); | ||
353 | cache->tags[i].flags = 0; | ||
354 | } | ||
355 | } | ||
356 | |||
357 | static unsigned int ps3vram_cache_match(struct mtd_info *mtd, loff_t address) | ||
358 | { | ||
359 | struct ps3vram_priv *priv = mtd->priv; | ||
360 | struct ps3vram_cache *cache = &priv->cache; | ||
361 | unsigned int base; | ||
362 | unsigned int offset; | ||
363 | int i; | ||
364 | static int counter; | ||
365 | |||
366 | offset = (unsigned int) (address & (cache->page_size - 1)); | ||
367 | base = (unsigned int) (address - offset); | ||
368 | |||
369 | /* fully associative check */ | ||
370 | for (i = 0; i < cache->page_count; i++) { | ||
371 | if ((cache->tags[i].flags & CACHE_PAGE_PRESENT) && | ||
372 | cache->tags[i].address == base) { | ||
373 | dev_dbg(priv->dev, "%s:%d: found entry %d : 0x%08x\n", | ||
374 | __func__, __LINE__, i, cache->tags[i].address); | ||
375 | return i; | ||
376 | } | ||
377 | } | ||
378 | |||
379 | /* choose a random entry */ | ||
380 | i = (jiffies + (counter++)) % cache->page_count; | ||
381 | dev_dbg(priv->dev, "%s:%d: using entry %d\n", __func__, __LINE__, i); | ||
382 | |||
383 | ps3vram_cache_evict(mtd, i); | ||
384 | ps3vram_cache_load(mtd, i, base); | ||
385 | |||
386 | return i; | ||
387 | } | ||
388 | |||
389 | static int ps3vram_cache_init(struct mtd_info *mtd) | ||
390 | { | ||
391 | struct ps3vram_priv *priv = mtd->priv; | ||
392 | |||
393 | priv->cache.page_count = CACHE_PAGE_COUNT; | ||
394 | priv->cache.page_size = CACHE_PAGE_SIZE; | ||
395 | priv->cache.tags = kzalloc(sizeof(struct ps3vram_tag) * | ||
396 | CACHE_PAGE_COUNT, GFP_KERNEL); | ||
397 | if (priv->cache.tags == NULL) { | ||
398 | dev_err(priv->dev, "%s:%d: could not allocate cache tags\n", | ||
399 | __func__, __LINE__); | ||
400 | return -ENOMEM; | ||
401 | } | ||
402 | |||
403 | dev_info(priv->dev, "created ram cache: %d entries, %d KiB each\n", | ||
404 | CACHE_PAGE_COUNT, CACHE_PAGE_SIZE / 1024); | ||
405 | |||
406 | return 0; | ||
407 | } | ||
408 | |||
409 | static void ps3vram_cache_cleanup(struct mtd_info *mtd) | ||
410 | { | ||
411 | struct ps3vram_priv *priv = mtd->priv; | ||
412 | |||
413 | ps3vram_cache_flush(mtd); | ||
414 | kfree(priv->cache.tags); | ||
415 | } | ||
416 | |||
417 | static int ps3vram_erase(struct mtd_info *mtd, struct erase_info *instr) | ||
418 | { | ||
419 | struct ps3vram_priv *priv = mtd->priv; | ||
420 | |||
421 | if (instr->addr + instr->len > mtd->size) | ||
422 | return -EINVAL; | ||
423 | |||
424 | mutex_lock(&priv->lock); | ||
425 | |||
426 | ps3vram_cache_flush(mtd); | ||
427 | |||
428 | /* Set bytes to 0xFF */ | ||
429 | memset_io(priv->ddr_base + instr->addr, 0xFF, instr->len); | ||
430 | |||
431 | mutex_unlock(&priv->lock); | ||
432 | |||
433 | instr->state = MTD_ERASE_DONE; | ||
434 | mtd_erase_callback(instr); | ||
435 | |||
436 | return 0; | ||
437 | } | ||
438 | |||
439 | static int ps3vram_read(struct mtd_info *mtd, loff_t from, size_t len, | ||
440 | size_t *retlen, u_char *buf) | ||
441 | { | ||
442 | struct ps3vram_priv *priv = mtd->priv; | ||
443 | unsigned int cached, count; | ||
444 | |||
445 | dev_dbg(priv->dev, "%s:%d: from=0x%08x len=0x%zx\n", __func__, __LINE__, | ||
446 | (unsigned int)from, len); | ||
447 | |||
448 | if (from >= mtd->size) | ||
449 | return -EINVAL; | ||
450 | |||
451 | if (len > mtd->size - from) | ||
452 | len = mtd->size - from; | ||
453 | |||
454 | /* Copy from vram to buf */ | ||
455 | count = len; | ||
456 | while (count) { | ||
457 | unsigned int offset, avail; | ||
458 | unsigned int entry; | ||
459 | |||
460 | offset = (unsigned int) (from & (priv->cache.page_size - 1)); | ||
461 | avail = priv->cache.page_size - offset; | ||
462 | |||
463 | mutex_lock(&priv->lock); | ||
464 | |||
465 | entry = ps3vram_cache_match(mtd, from); | ||
466 | cached = CACHE_OFFSET + entry * priv->cache.page_size + offset; | ||
467 | |||
468 | dev_dbg(priv->dev, "%s:%d: from=%08x cached=%08x offset=%08x " | ||
469 | "avail=%08x count=%08x\n", __func__, __LINE__, | ||
470 | (unsigned int)from, cached, offset, avail, count); | ||
471 | |||
472 | if (avail > count) | ||
473 | avail = count; | ||
474 | memcpy(buf, priv->xdr_buf + cached, avail); | ||
475 | |||
476 | mutex_unlock(&priv->lock); | ||
477 | |||
478 | buf += avail; | ||
479 | count -= avail; | ||
480 | from += avail; | ||
481 | } | ||
482 | |||
483 | *retlen = len; | ||
484 | return 0; | ||
485 | } | ||
486 | |||
487 | static int ps3vram_write(struct mtd_info *mtd, loff_t to, size_t len, | ||
488 | size_t *retlen, const u_char *buf) | ||
489 | { | ||
490 | struct ps3vram_priv *priv = mtd->priv; | ||
491 | unsigned int cached, count; | ||
492 | |||
493 | if (to >= mtd->size) | ||
494 | return -EINVAL; | ||
495 | |||
496 | if (len > mtd->size - to) | ||
497 | len = mtd->size - to; | ||
498 | |||
499 | /* Copy from buf to vram */ | ||
500 | count = len; | ||
501 | while (count) { | ||
502 | unsigned int offset, avail; | ||
503 | unsigned int entry; | ||
504 | |||
505 | offset = (unsigned int) (to & (priv->cache.page_size - 1)); | ||
506 | avail = priv->cache.page_size - offset; | ||
507 | |||
508 | mutex_lock(&priv->lock); | ||
509 | |||
510 | entry = ps3vram_cache_match(mtd, to); | ||
511 | cached = CACHE_OFFSET + entry * priv->cache.page_size + offset; | ||
512 | |||
513 | dev_dbg(priv->dev, "%s:%d: to=%08x cached=%08x offset=%08x " | ||
514 | "avail=%08x count=%08x\n", __func__, __LINE__, | ||
515 | (unsigned int)to, cached, offset, avail, count); | ||
516 | |||
517 | if (avail > count) | ||
518 | avail = count; | ||
519 | memcpy(priv->xdr_buf + cached, buf, avail); | ||
520 | |||
521 | priv->cache.tags[entry].flags |= CACHE_PAGE_DIRTY; | ||
522 | |||
523 | mutex_unlock(&priv->lock); | ||
524 | |||
525 | buf += avail; | ||
526 | count -= avail; | ||
527 | to += avail; | ||
528 | } | ||
529 | |||
530 | *retlen = len; | ||
531 | return 0; | ||
532 | } | ||
533 | |||
534 | static int __devinit ps3vram_probe(struct ps3_system_bus_device *dev) | ||
535 | { | ||
536 | struct ps3vram_priv *priv; | ||
537 | int status; | ||
538 | u64 ddr_lpar; | ||
539 | u64 ctrl_lpar; | ||
540 | u64 info_lpar; | ||
541 | u64 reports_lpar; | ||
542 | u64 ddr_size; | ||
543 | u64 reports_size; | ||
544 | int ret = -ENOMEM; | ||
545 | char *rest; | ||
546 | |||
547 | ret = -EIO; | ||
548 | ps3vram_mtd.priv = kzalloc(sizeof(struct ps3vram_priv), GFP_KERNEL); | ||
549 | if (!ps3vram_mtd.priv) | ||
550 | goto out; | ||
551 | priv = ps3vram_mtd.priv; | ||
552 | |||
553 | mutex_init(&priv->lock); | ||
554 | priv->dev = &dev->core; | ||
555 | |||
556 | /* Allocate XDR buffer (1MiB aligned) */ | ||
557 | priv->xdr_buf = (void *)__get_free_pages(GFP_KERNEL, | ||
558 | get_order(XDR_BUF_SIZE)); | ||
559 | if (priv->xdr_buf == NULL) { | ||
560 | dev_dbg(&dev->core, "%s:%d: could not allocate XDR buffer\n", | ||
561 | __func__, __LINE__); | ||
562 | ret = -ENOMEM; | ||
563 | goto out_free_priv; | ||
564 | } | ||
565 | |||
566 | /* Put FIFO at begginning of XDR buffer */ | ||
567 | priv->fifo_base = (u32 *) (priv->xdr_buf + FIFO_OFFSET); | ||
568 | priv->fifo_ptr = priv->fifo_base; | ||
569 | |||
570 | /* XXX: Need to open GPU, in case ps3fb or snd_ps3 aren't loaded */ | ||
571 | if (ps3_open_hv_device(dev)) { | ||
572 | dev_err(&dev->core, "%s:%d: ps3_open_hv_device failed\n", | ||
573 | __func__, __LINE__); | ||
574 | ret = -EAGAIN; | ||
575 | goto out_close_gpu; | ||
576 | } | ||
577 | |||
578 | /* Request memory */ | ||
579 | status = -1; | ||
580 | ddr_size = memparse(size, &rest); | ||
581 | if (*rest == '-') | ||
582 | ddr_size -= ps3fb_videomemory.size; | ||
583 | ddr_size = ALIGN(ddr_size, 1024*1024); | ||
584 | if (ddr_size <= 0) { | ||
585 | dev_err(&dev->core, "%s:%d: specified size is too small\n", | ||
586 | __func__, __LINE__); | ||
587 | ret = -EINVAL; | ||
588 | goto out_close_gpu; | ||
589 | } | ||
590 | |||
591 | while (ddr_size > 0) { | ||
592 | status = lv1_gpu_memory_allocate(ddr_size, 0, 0, 0, 0, | ||
593 | &priv->memory_handle, | ||
594 | &ddr_lpar); | ||
595 | if (!status) | ||
596 | break; | ||
597 | ddr_size -= 1024*1024; | ||
598 | } | ||
599 | if (status || ddr_size <= 0) { | ||
600 | dev_err(&dev->core, "%s:%d: lv1_gpu_memory_allocate failed\n", | ||
601 | __func__, __LINE__); | ||
602 | ret = -ENOMEM; | ||
603 | goto out_free_xdr_buf; | ||
604 | } | ||
605 | |||
606 | /* Request context */ | ||
607 | status = lv1_gpu_context_allocate(priv->memory_handle, | ||
608 | 0, | ||
609 | &priv->context_handle, | ||
610 | &ctrl_lpar, | ||
611 | &info_lpar, | ||
612 | &reports_lpar, | ||
613 | &reports_size); | ||
614 | if (status) { | ||
615 | dev_err(&dev->core, "%s:%d: lv1_gpu_context_allocate failed\n", | ||
616 | __func__, __LINE__); | ||
617 | ret = -ENOMEM; | ||
618 | goto out_free_memory; | ||
619 | } | ||
620 | |||
621 | /* Map XDR buffer to RSX */ | ||
622 | status = lv1_gpu_context_iomap(priv->context_handle, XDR_IOIF, | ||
623 | ps3_mm_phys_to_lpar(__pa(priv->xdr_buf)), | ||
624 | XDR_BUF_SIZE, 0); | ||
625 | if (status) { | ||
626 | dev_err(&dev->core, "%s:%d: lv1_gpu_context_iomap failed\n", | ||
627 | __func__, __LINE__); | ||
628 | ret = -ENOMEM; | ||
629 | goto out_free_context; | ||
630 | } | ||
631 | |||
632 | priv->ddr_base = ioremap_flags(ddr_lpar, ddr_size, _PAGE_NO_CACHE); | ||
633 | |||
634 | if (!priv->ddr_base) { | ||
635 | dev_err(&dev->core, "%s:%d: ioremap failed\n", __func__, | ||
636 | __LINE__); | ||
637 | ret = -ENOMEM; | ||
638 | goto out_free_context; | ||
639 | } | ||
640 | |||
641 | priv->ctrl = ioremap(ctrl_lpar, 64 * 1024); | ||
642 | if (!priv->ctrl) { | ||
643 | dev_err(&dev->core, "%s:%d: ioremap failed\n", __func__, | ||
644 | __LINE__); | ||
645 | ret = -ENOMEM; | ||
646 | goto out_unmap_vram; | ||
647 | } | ||
648 | |||
649 | priv->reports = ioremap(reports_lpar, reports_size); | ||
650 | if (!priv->reports) { | ||
651 | dev_err(&dev->core, "%s:%d: ioremap failed\n", __func__, | ||
652 | __LINE__); | ||
653 | ret = -ENOMEM; | ||
654 | goto out_unmap_ctrl; | ||
655 | } | ||
656 | |||
657 | mutex_lock(&ps3_gpu_mutex); | ||
658 | ps3vram_init_ring(&ps3vram_mtd); | ||
659 | mutex_unlock(&ps3_gpu_mutex); | ||
660 | |||
661 | ps3vram_mtd.name = "ps3vram"; | ||
662 | ps3vram_mtd.size = ddr_size; | ||
663 | ps3vram_mtd.flags = MTD_CAP_RAM; | ||
664 | ps3vram_mtd.erase = ps3vram_erase; | ||
665 | ps3vram_mtd.point = NULL; | ||
666 | ps3vram_mtd.unpoint = NULL; | ||
667 | ps3vram_mtd.read = ps3vram_read; | ||
668 | ps3vram_mtd.write = ps3vram_write; | ||
669 | ps3vram_mtd.owner = THIS_MODULE; | ||
670 | ps3vram_mtd.type = MTD_RAM; | ||
671 | ps3vram_mtd.erasesize = CACHE_PAGE_SIZE; | ||
672 | ps3vram_mtd.writesize = 1; | ||
673 | |||
674 | ps3vram_bind(&ps3vram_mtd); | ||
675 | |||
676 | mutex_lock(&ps3_gpu_mutex); | ||
677 | ret = ps3vram_wait_ring(&ps3vram_mtd, 100); | ||
678 | mutex_unlock(&ps3_gpu_mutex); | ||
679 | if (ret < 0) { | ||
680 | dev_err(&dev->core, "%s:%d: failed to initialize channels\n", | ||
681 | __func__, __LINE__); | ||
682 | ret = -ETIMEDOUT; | ||
683 | goto out_unmap_reports; | ||
684 | } | ||
685 | |||
686 | ps3vram_cache_init(&ps3vram_mtd); | ||
687 | |||
688 | if (add_mtd_device(&ps3vram_mtd)) { | ||
689 | dev_err(&dev->core, "%s:%d: add_mtd_device failed\n", | ||
690 | __func__, __LINE__); | ||
691 | ret = -EAGAIN; | ||
692 | goto out_cache_cleanup; | ||
693 | } | ||
694 | |||
695 | dev_info(&dev->core, "reserved %u MiB of gpu memory\n", | ||
696 | (unsigned int)(ddr_size / 1024 / 1024)); | ||
697 | |||
698 | return 0; | ||
699 | |||
700 | out_cache_cleanup: | ||
701 | ps3vram_cache_cleanup(&ps3vram_mtd); | ||
702 | out_unmap_reports: | ||
703 | iounmap(priv->reports); | ||
704 | out_unmap_ctrl: | ||
705 | iounmap(priv->ctrl); | ||
706 | out_unmap_vram: | ||
707 | iounmap(priv->ddr_base); | ||
708 | out_free_context: | ||
709 | lv1_gpu_context_free(priv->context_handle); | ||
710 | out_free_memory: | ||
711 | lv1_gpu_memory_free(priv->memory_handle); | ||
712 | out_close_gpu: | ||
713 | ps3_close_hv_device(dev); | ||
714 | out_free_xdr_buf: | ||
715 | free_pages((unsigned long) priv->xdr_buf, get_order(XDR_BUF_SIZE)); | ||
716 | out_free_priv: | ||
717 | kfree(ps3vram_mtd.priv); | ||
718 | ps3vram_mtd.priv = NULL; | ||
719 | out: | ||
720 | return ret; | ||
721 | } | ||
722 | |||
723 | static int ps3vram_shutdown(struct ps3_system_bus_device *dev) | ||
724 | { | ||
725 | struct ps3vram_priv *priv; | ||
726 | |||
727 | priv = ps3vram_mtd.priv; | ||
728 | |||
729 | del_mtd_device(&ps3vram_mtd); | ||
730 | ps3vram_cache_cleanup(&ps3vram_mtd); | ||
731 | iounmap(priv->reports); | ||
732 | iounmap(priv->ctrl); | ||
733 | iounmap(priv->ddr_base); | ||
734 | lv1_gpu_context_free(priv->context_handle); | ||
735 | lv1_gpu_memory_free(priv->memory_handle); | ||
736 | ps3_close_hv_device(dev); | ||
737 | free_pages((unsigned long) priv->xdr_buf, get_order(XDR_BUF_SIZE)); | ||
738 | kfree(priv); | ||
739 | return 0; | ||
740 | } | ||
741 | |||
742 | static struct ps3_system_bus_driver ps3vram_driver = { | ||
743 | .match_id = PS3_MATCH_ID_GPU, | ||
744 | .match_sub_id = PS3_MATCH_SUB_ID_GPU_RAMDISK, | ||
745 | .core.name = DEVICE_NAME, | ||
746 | .core.owner = THIS_MODULE, | ||
747 | .probe = ps3vram_probe, | ||
748 | .remove = ps3vram_shutdown, | ||
749 | .shutdown = ps3vram_shutdown, | ||
750 | }; | ||
751 | |||
752 | static int __init ps3vram_init(void) | ||
753 | { | ||
754 | return ps3_system_bus_driver_register(&ps3vram_driver); | ||
755 | } | ||
756 | |||
757 | static void __exit ps3vram_exit(void) | ||
758 | { | ||
759 | ps3_system_bus_driver_unregister(&ps3vram_driver); | ||
760 | } | ||
761 | |||
762 | module_init(ps3vram_init); | ||
763 | module_exit(ps3vram_exit); | ||
764 | |||
765 | MODULE_LICENSE("GPL"); | ||
766 | MODULE_AUTHOR("Jim Paris <jim@jtan.com>"); | ||
767 | MODULE_DESCRIPTION("MTD driver for PS3 video RAM"); | ||
768 | MODULE_ALIAS(PS3_MODULE_ALIAS_GPU_RAMDISK); | ||
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c index 5d9bcf109c13..4abbe573fa40 100644 --- a/drivers/mtd/ubi/kapi.c +++ b/drivers/mtd/ubi/kapi.c | |||
@@ -564,7 +564,7 @@ EXPORT_SYMBOL_GPL(ubi_leb_unmap); | |||
564 | * @dtype: expected data type | 564 | * @dtype: expected data type |
565 | * | 565 | * |
566 | * This function maps an un-mapped logical eraseblock @lnum to a physical | 566 | * This function maps an un-mapped logical eraseblock @lnum to a physical |
567 | * eraseblock. This means, that after a successfull invocation of this | 567 | * eraseblock. This means, that after a successful invocation of this |
568 | * function the logical eraseblock @lnum will be empty (contain only %0xFF | 568 | * function the logical eraseblock @lnum will be empty (contain only %0xFF |
569 | * bytes) and be mapped to a physical eraseblock, even if an unclean reboot | 569 | * bytes) and be mapped to a physical eraseblock, even if an unclean reboot |
570 | * happens. | 570 | * happens. |
diff --git a/drivers/net/wireless/ath5k/dma.c b/drivers/net/wireless/ath5k/dma.c index 7e2b1a67e5da..b65b4feb2d28 100644 --- a/drivers/net/wireless/ath5k/dma.c +++ b/drivers/net/wireless/ath5k/dma.c | |||
@@ -594,7 +594,7 @@ int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask) | |||
594 | * XXX: BMISS interrupts may occur after association. | 594 | * XXX: BMISS interrupts may occur after association. |
595 | * I found this on 5210 code but it needs testing. If this is | 595 | * I found this on 5210 code but it needs testing. If this is |
596 | * true we should disable them before assoc and re-enable them | 596 | * true we should disable them before assoc and re-enable them |
597 | * after a successfull assoc + some jiffies. | 597 | * after a successful assoc + some jiffies. |
598 | interrupt_mask &= ~AR5K_INT_BMISS; | 598 | interrupt_mask &= ~AR5K_INT_BMISS; |
599 | */ | 599 | */ |
600 | } | 600 | } |
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c index 9caa96a13586..a611ad857983 100644 --- a/drivers/net/wireless/zd1211rw/zd_mac.c +++ b/drivers/net/wireless/zd1211rw/zd_mac.c | |||
@@ -287,7 +287,7 @@ static void zd_op_stop(struct ieee80211_hw *hw) | |||
287 | * @skb - a sk-buffer | 287 | * @skb - a sk-buffer |
288 | * @flags: extra flags to set in the TX status info | 288 | * @flags: extra flags to set in the TX status info |
289 | * @ackssi: ACK signal strength | 289 | * @ackssi: ACK signal strength |
290 | * @success - True for successfull transmission of the frame | 290 | * @success - True for successful transmission of the frame |
291 | * | 291 | * |
292 | * This information calls ieee80211_tx_status_irqsafe() if required by the | 292 | * This information calls ieee80211_tx_status_irqsafe() if required by the |
293 | * control information. It copies the control information into the status | 293 | * control information. It copies the control information into the status |
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c index 162330b9d1dc..7e5155e88ac7 100644 --- a/drivers/rtc/rtc-ds1307.c +++ b/drivers/rtc/rtc-ds1307.c | |||
@@ -86,13 +86,11 @@ enum ds_type { | |||
86 | 86 | ||
87 | 87 | ||
88 | struct ds1307 { | 88 | struct ds1307 { |
89 | u8 reg_addr; | ||
90 | u8 regs[11]; | 89 | u8 regs[11]; |
91 | enum ds_type type; | 90 | enum ds_type type; |
92 | unsigned long flags; | 91 | unsigned long flags; |
93 | #define HAS_NVRAM 0 /* bit 0 == sysfs file active */ | 92 | #define HAS_NVRAM 0 /* bit 0 == sysfs file active */ |
94 | #define HAS_ALARM 1 /* bit 1 == irq claimed */ | 93 | #define HAS_ALARM 1 /* bit 1 == irq claimed */ |
95 | struct i2c_msg msg[2]; | ||
96 | struct i2c_client *client; | 94 | struct i2c_client *client; |
97 | struct rtc_device *rtc; | 95 | struct rtc_device *rtc; |
98 | struct work_struct work; | 96 | struct work_struct work; |
@@ -204,13 +202,9 @@ static int ds1307_get_time(struct device *dev, struct rtc_time *t) | |||
204 | int tmp; | 202 | int tmp; |
205 | 203 | ||
206 | /* read the RTC date and time registers all at once */ | 204 | /* read the RTC date and time registers all at once */ |
207 | ds1307->reg_addr = 0; | 205 | tmp = i2c_smbus_read_i2c_block_data(ds1307->client, |
208 | ds1307->msg[1].flags = I2C_M_RD; | 206 | DS1307_REG_SECS, 7, ds1307->regs); |
209 | ds1307->msg[1].len = 7; | 207 | if (tmp != 7) { |
210 | |||
211 | tmp = i2c_transfer(to_i2c_adapter(ds1307->client->dev.parent), | ||
212 | ds1307->msg, 2); | ||
213 | if (tmp != 2) { | ||
214 | dev_err(dev, "%s error %d\n", "read", tmp); | 208 | dev_err(dev, "%s error %d\n", "read", tmp); |
215 | return -EIO; | 209 | return -EIO; |
216 | } | 210 | } |
@@ -257,7 +251,6 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t) | |||
257 | t->tm_hour, t->tm_mday, | 251 | t->tm_hour, t->tm_mday, |
258 | t->tm_mon, t->tm_year, t->tm_wday); | 252 | t->tm_mon, t->tm_year, t->tm_wday); |
259 | 253 | ||
260 | *buf++ = 0; /* first register addr */ | ||
261 | buf[DS1307_REG_SECS] = bin2bcd(t->tm_sec); | 254 | buf[DS1307_REG_SECS] = bin2bcd(t->tm_sec); |
262 | buf[DS1307_REG_MIN] = bin2bcd(t->tm_min); | 255 | buf[DS1307_REG_MIN] = bin2bcd(t->tm_min); |
263 | buf[DS1307_REG_HOUR] = bin2bcd(t->tm_hour); | 256 | buf[DS1307_REG_HOUR] = bin2bcd(t->tm_hour); |
@@ -282,23 +275,19 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t) | |||
282 | break; | 275 | break; |
283 | } | 276 | } |
284 | 277 | ||
285 | ds1307->msg[1].flags = 0; | ||
286 | ds1307->msg[1].len = 8; | ||
287 | |||
288 | dev_dbg(dev, "%s: %02x %02x %02x %02x %02x %02x %02x\n", | 278 | dev_dbg(dev, "%s: %02x %02x %02x %02x %02x %02x %02x\n", |
289 | "write", buf[0], buf[1], buf[2], buf[3], | 279 | "write", buf[0], buf[1], buf[2], buf[3], |
290 | buf[4], buf[5], buf[6]); | 280 | buf[4], buf[5], buf[6]); |
291 | 281 | ||
292 | result = i2c_transfer(to_i2c_adapter(ds1307->client->dev.parent), | 282 | result = i2c_smbus_write_i2c_block_data(ds1307->client, 0, 7, buf); |
293 | &ds1307->msg[1], 1); | 283 | if (result < 0) { |
294 | if (result != 1) { | 284 | dev_err(dev, "%s error %d\n", "write", result); |
295 | dev_err(dev, "%s error %d\n", "write", tmp); | 285 | return result; |
296 | return -EIO; | ||
297 | } | 286 | } |
298 | return 0; | 287 | return 0; |
299 | } | 288 | } |
300 | 289 | ||
301 | static int ds1307_read_alarm(struct device *dev, struct rtc_wkalrm *t) | 290 | static int ds1337_read_alarm(struct device *dev, struct rtc_wkalrm *t) |
302 | { | 291 | { |
303 | struct i2c_client *client = to_i2c_client(dev); | 292 | struct i2c_client *client = to_i2c_client(dev); |
304 | struct ds1307 *ds1307 = i2c_get_clientdata(client); | 293 | struct ds1307 *ds1307 = i2c_get_clientdata(client); |
@@ -308,13 +297,9 @@ static int ds1307_read_alarm(struct device *dev, struct rtc_wkalrm *t) | |||
308 | return -EINVAL; | 297 | return -EINVAL; |
309 | 298 | ||
310 | /* read all ALARM1, ALARM2, and status registers at once */ | 299 | /* read all ALARM1, ALARM2, and status registers at once */ |
311 | ds1307->reg_addr = DS1339_REG_ALARM1_SECS; | 300 | ret = i2c_smbus_read_i2c_block_data(client, |
312 | ds1307->msg[1].flags = I2C_M_RD; | 301 | DS1339_REG_ALARM1_SECS, 9, ds1307->regs); |
313 | ds1307->msg[1].len = 9; | 302 | if (ret != 9) { |
314 | |||
315 | ret = i2c_transfer(to_i2c_adapter(client->dev.parent), | ||
316 | ds1307->msg, 2); | ||
317 | if (ret != 2) { | ||
318 | dev_err(dev, "%s error %d\n", "alarm read", ret); | 303 | dev_err(dev, "%s error %d\n", "alarm read", ret); |
319 | return -EIO; | 304 | return -EIO; |
320 | } | 305 | } |
@@ -353,7 +338,7 @@ static int ds1307_read_alarm(struct device *dev, struct rtc_wkalrm *t) | |||
353 | return 0; | 338 | return 0; |
354 | } | 339 | } |
355 | 340 | ||
356 | static int ds1307_set_alarm(struct device *dev, struct rtc_wkalrm *t) | 341 | static int ds1337_set_alarm(struct device *dev, struct rtc_wkalrm *t) |
357 | { | 342 | { |
358 | struct i2c_client *client = to_i2c_client(dev); | 343 | struct i2c_client *client = to_i2c_client(dev); |
359 | struct ds1307 *ds1307 = i2c_get_clientdata(client); | 344 | struct ds1307 *ds1307 = i2c_get_clientdata(client); |
@@ -371,13 +356,9 @@ static int ds1307_set_alarm(struct device *dev, struct rtc_wkalrm *t) | |||
371 | t->enabled, t->pending); | 356 | t->enabled, t->pending); |
372 | 357 | ||
373 | /* read current status of both alarms and the chip */ | 358 | /* read current status of both alarms and the chip */ |
374 | ds1307->reg_addr = DS1339_REG_ALARM1_SECS; | 359 | ret = i2c_smbus_read_i2c_block_data(client, |
375 | ds1307->msg[1].flags = I2C_M_RD; | 360 | DS1339_REG_ALARM1_SECS, 9, buf); |
376 | ds1307->msg[1].len = 9; | 361 | if (ret != 9) { |
377 | |||
378 | ret = i2c_transfer(to_i2c_adapter(client->dev.parent), | ||
379 | ds1307->msg, 2); | ||
380 | if (ret != 2) { | ||
381 | dev_err(dev, "%s error %d\n", "alarm write", ret); | 362 | dev_err(dev, "%s error %d\n", "alarm write", ret); |
382 | return -EIO; | 363 | return -EIO; |
383 | } | 364 | } |
@@ -392,7 +373,6 @@ static int ds1307_set_alarm(struct device *dev, struct rtc_wkalrm *t) | |||
392 | ds1307->regs[6], control, status); | 373 | ds1307->regs[6], control, status); |
393 | 374 | ||
394 | /* set ALARM1, using 24 hour and day-of-month modes */ | 375 | /* set ALARM1, using 24 hour and day-of-month modes */ |
395 | *buf++ = DS1339_REG_ALARM1_SECS; /* first register addr */ | ||
396 | buf[0] = bin2bcd(t->time.tm_sec); | 376 | buf[0] = bin2bcd(t->time.tm_sec); |
397 | buf[1] = bin2bcd(t->time.tm_min); | 377 | buf[1] = bin2bcd(t->time.tm_min); |
398 | buf[2] = bin2bcd(t->time.tm_hour); | 378 | buf[2] = bin2bcd(t->time.tm_hour); |
@@ -411,14 +391,11 @@ static int ds1307_set_alarm(struct device *dev, struct rtc_wkalrm *t) | |||
411 | } | 391 | } |
412 | buf[8] = status & ~(DS1337_BIT_A1I | DS1337_BIT_A2I); | 392 | buf[8] = status & ~(DS1337_BIT_A1I | DS1337_BIT_A2I); |
413 | 393 | ||
414 | ds1307->msg[1].flags = 0; | 394 | ret = i2c_smbus_write_i2c_block_data(client, |
415 | ds1307->msg[1].len = 10; | 395 | DS1339_REG_ALARM1_SECS, 9, buf); |
416 | 396 | if (ret < 0) { | |
417 | ret = i2c_transfer(to_i2c_adapter(client->dev.parent), | ||
418 | &ds1307->msg[1], 1); | ||
419 | if (ret != 1) { | ||
420 | dev_err(dev, "can't set alarm time\n"); | 397 | dev_err(dev, "can't set alarm time\n"); |
421 | return -EIO; | 398 | return ret; |
422 | } | 399 | } |
423 | 400 | ||
424 | return 0; | 401 | return 0; |
@@ -475,8 +452,8 @@ static int ds1307_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) | |||
475 | static const struct rtc_class_ops ds13xx_rtc_ops = { | 452 | static const struct rtc_class_ops ds13xx_rtc_ops = { |
476 | .read_time = ds1307_get_time, | 453 | .read_time = ds1307_get_time, |
477 | .set_time = ds1307_set_time, | 454 | .set_time = ds1307_set_time, |
478 | .read_alarm = ds1307_read_alarm, | 455 | .read_alarm = ds1337_read_alarm, |
479 | .set_alarm = ds1307_set_alarm, | 456 | .set_alarm = ds1337_set_alarm, |
480 | .ioctl = ds1307_ioctl, | 457 | .ioctl = ds1307_ioctl, |
481 | }; | 458 | }; |
482 | 459 | ||
@@ -490,7 +467,6 @@ ds1307_nvram_read(struct kobject *kobj, struct bin_attribute *attr, | |||
490 | { | 467 | { |
491 | struct i2c_client *client; | 468 | struct i2c_client *client; |
492 | struct ds1307 *ds1307; | 469 | struct ds1307 *ds1307; |
493 | struct i2c_msg msg[2]; | ||
494 | int result; | 470 | int result; |
495 | 471 | ||
496 | client = kobj_to_i2c_client(kobj); | 472 | client = kobj_to_i2c_client(kobj); |
@@ -503,24 +479,10 @@ ds1307_nvram_read(struct kobject *kobj, struct bin_attribute *attr, | |||
503 | if (unlikely(!count)) | 479 | if (unlikely(!count)) |
504 | return count; | 480 | return count; |
505 | 481 | ||
506 | msg[0].addr = client->addr; | 482 | result = i2c_smbus_read_i2c_block_data(client, 8 + off, count, buf); |
507 | msg[0].flags = 0; | 483 | if (result < 0) |
508 | msg[0].len = 1; | ||
509 | msg[0].buf = buf; | ||
510 | |||
511 | buf[0] = 8 + off; | ||
512 | |||
513 | msg[1].addr = client->addr; | ||
514 | msg[1].flags = I2C_M_RD; | ||
515 | msg[1].len = count; | ||
516 | msg[1].buf = buf; | ||
517 | |||
518 | result = i2c_transfer(to_i2c_adapter(client->dev.parent), msg, 2); | ||
519 | if (result != 2) { | ||
520 | dev_err(&client->dev, "%s error %d\n", "nvram read", result); | 484 | dev_err(&client->dev, "%s error %d\n", "nvram read", result); |
521 | return -EIO; | 485 | return result; |
522 | } | ||
523 | return count; | ||
524 | } | 486 | } |
525 | 487 | ||
526 | static ssize_t | 488 | static ssize_t |
@@ -528,8 +490,7 @@ ds1307_nvram_write(struct kobject *kobj, struct bin_attribute *attr, | |||
528 | char *buf, loff_t off, size_t count) | 490 | char *buf, loff_t off, size_t count) |
529 | { | 491 | { |
530 | struct i2c_client *client; | 492 | struct i2c_client *client; |
531 | u8 buffer[NVRAM_SIZE + 1]; | 493 | int result; |
532 | int ret; | ||
533 | 494 | ||
534 | client = kobj_to_i2c_client(kobj); | 495 | client = kobj_to_i2c_client(kobj); |
535 | 496 | ||
@@ -540,11 +501,12 @@ ds1307_nvram_write(struct kobject *kobj, struct bin_attribute *attr, | |||
540 | if (unlikely(!count)) | 501 | if (unlikely(!count)) |
541 | return count; | 502 | return count; |
542 | 503 | ||
543 | buffer[0] = 8 + off; | 504 | result = i2c_smbus_write_i2c_block_data(client, 8 + off, count, buf); |
544 | memcpy(buffer + 1, buf, count); | 505 | if (result < 0) { |
545 | 506 | dev_err(&client->dev, "%s error %d\n", "nvram write", result); | |
546 | ret = i2c_master_send(client, buffer, count + 1); | 507 | return result; |
547 | return (ret < 0) ? ret : (ret - 1); | 508 | } |
509 | return count; | ||
548 | } | 510 | } |
549 | 511 | ||
550 | static struct bin_attribute nvram = { | 512 | static struct bin_attribute nvram = { |
@@ -571,9 +533,11 @@ static int __devinit ds1307_probe(struct i2c_client *client, | |||
571 | const struct chip_desc *chip = &chips[id->driver_data]; | 533 | const struct chip_desc *chip = &chips[id->driver_data]; |
572 | struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); | 534 | struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); |
573 | int want_irq = false; | 535 | int want_irq = false; |
536 | unsigned char *buf; | ||
574 | 537 | ||
575 | if (!i2c_check_functionality(adapter, | 538 | if (!i2c_check_functionality(adapter, |
576 | I2C_FUNC_I2C | I2C_FUNC_SMBUS_WRITE_BYTE_DATA)) | 539 | I2C_FUNC_SMBUS_WRITE_BYTE_DATA | |
540 | I2C_FUNC_SMBUS_I2C_BLOCK)) | ||
577 | return -EIO; | 541 | return -EIO; |
578 | 542 | ||
579 | if (!(ds1307 = kzalloc(sizeof(struct ds1307), GFP_KERNEL))) | 543 | if (!(ds1307 = kzalloc(sizeof(struct ds1307), GFP_KERNEL))) |
@@ -581,18 +545,8 @@ static int __devinit ds1307_probe(struct i2c_client *client, | |||
581 | 545 | ||
582 | ds1307->client = client; | 546 | ds1307->client = client; |
583 | i2c_set_clientdata(client, ds1307); | 547 | i2c_set_clientdata(client, ds1307); |
584 | |||
585 | ds1307->msg[0].addr = client->addr; | ||
586 | ds1307->msg[0].flags = 0; | ||
587 | ds1307->msg[0].len = 1; | ||
588 | ds1307->msg[0].buf = &ds1307->reg_addr; | ||
589 | |||
590 | ds1307->msg[1].addr = client->addr; | ||
591 | ds1307->msg[1].flags = I2C_M_RD; | ||
592 | ds1307->msg[1].len = sizeof(ds1307->regs); | ||
593 | ds1307->msg[1].buf = ds1307->regs; | ||
594 | |||
595 | ds1307->type = id->driver_data; | 548 | ds1307->type = id->driver_data; |
549 | buf = ds1307->regs; | ||
596 | 550 | ||
597 | switch (ds1307->type) { | 551 | switch (ds1307->type) { |
598 | case ds_1337: | 552 | case ds_1337: |
@@ -602,21 +556,15 @@ static int __devinit ds1307_probe(struct i2c_client *client, | |||
602 | INIT_WORK(&ds1307->work, ds1307_work); | 556 | INIT_WORK(&ds1307->work, ds1307_work); |
603 | want_irq = true; | 557 | want_irq = true; |
604 | } | 558 | } |
605 | |||
606 | ds1307->reg_addr = DS1337_REG_CONTROL; | ||
607 | ds1307->msg[1].len = 2; | ||
608 | |||
609 | /* get registers that the "rtc" read below won't read... */ | 559 | /* get registers that the "rtc" read below won't read... */ |
610 | tmp = i2c_transfer(adapter, ds1307->msg, 2); | 560 | tmp = i2c_smbus_read_i2c_block_data(ds1307->client, |
561 | DS1337_REG_CONTROL, 2, buf); | ||
611 | if (tmp != 2) { | 562 | if (tmp != 2) { |
612 | pr_debug("read error %d\n", tmp); | 563 | pr_debug("read error %d\n", tmp); |
613 | err = -EIO; | 564 | err = -EIO; |
614 | goto exit_free; | 565 | goto exit_free; |
615 | } | 566 | } |
616 | 567 | ||
617 | ds1307->reg_addr = 0; | ||
618 | ds1307->msg[1].len = sizeof(ds1307->regs); | ||
619 | |||
620 | /* oscillator off? turn it on, so clock can tick. */ | 568 | /* oscillator off? turn it on, so clock can tick. */ |
621 | if (ds1307->regs[0] & DS1337_BIT_nEOSC) | 569 | if (ds1307->regs[0] & DS1337_BIT_nEOSC) |
622 | ds1307->regs[0] &= ~DS1337_BIT_nEOSC; | 570 | ds1307->regs[0] &= ~DS1337_BIT_nEOSC; |
@@ -647,9 +595,8 @@ static int __devinit ds1307_probe(struct i2c_client *client, | |||
647 | 595 | ||
648 | read_rtc: | 596 | read_rtc: |
649 | /* read RTC registers */ | 597 | /* read RTC registers */ |
650 | 598 | tmp = i2c_smbus_read_i2c_block_data(ds1307->client, 0, 8, buf); | |
651 | tmp = i2c_transfer(adapter, ds1307->msg, 2); | 599 | if (tmp != 8) { |
652 | if (tmp != 2) { | ||
653 | pr_debug("read error %d\n", tmp); | 600 | pr_debug("read error %d\n", tmp); |
654 | err = -EIO; | 601 | err = -EIO; |
655 | goto exit_free; | 602 | goto exit_free; |
@@ -707,22 +654,6 @@ read_rtc: | |||
707 | break; | 654 | break; |
708 | } | 655 | } |
709 | 656 | ||
710 | tmp = ds1307->regs[DS1307_REG_SECS]; | ||
711 | tmp = bcd2bin(tmp & 0x7f); | ||
712 | if (tmp > 60) | ||
713 | goto exit_bad; | ||
714 | tmp = bcd2bin(ds1307->regs[DS1307_REG_MIN] & 0x7f); | ||
715 | if (tmp > 60) | ||
716 | goto exit_bad; | ||
717 | |||
718 | tmp = bcd2bin(ds1307->regs[DS1307_REG_MDAY] & 0x3f); | ||
719 | if (tmp == 0 || tmp > 31) | ||
720 | goto exit_bad; | ||
721 | |||
722 | tmp = bcd2bin(ds1307->regs[DS1307_REG_MONTH] & 0x1f); | ||
723 | if (tmp == 0 || tmp > 12) | ||
724 | goto exit_bad; | ||
725 | |||
726 | tmp = ds1307->regs[DS1307_REG_HOUR]; | 657 | tmp = ds1307->regs[DS1307_REG_HOUR]; |
727 | switch (ds1307->type) { | 658 | switch (ds1307->type) { |
728 | case ds_1340: | 659 | case ds_1340: |
@@ -779,13 +710,6 @@ read_rtc: | |||
779 | 710 | ||
780 | return 0; | 711 | return 0; |
781 | 712 | ||
782 | exit_bad: | ||
783 | dev_dbg(&client->dev, "%s: %02x %02x %02x %02x %02x %02x %02x\n", | ||
784 | "bogus register", | ||
785 | ds1307->regs[0], ds1307->regs[1], | ||
786 | ds1307->regs[2], ds1307->regs[3], | ||
787 | ds1307->regs[4], ds1307->regs[5], | ||
788 | ds1307->regs[6]); | ||
789 | exit_irq: | 713 | exit_irq: |
790 | if (ds1307->rtc) | 714 | if (ds1307->rtc) |
791 | rtc_device_unregister(ds1307->rtc); | 715 | rtc_device_unregister(ds1307->rtc); |
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c index b8f9c00633f3..d82aad5224f0 100644 --- a/drivers/s390/block/dasd_3990_erp.c +++ b/drivers/s390/block/dasd_3990_erp.c | |||
@@ -2621,7 +2621,7 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr) | |||
2621 | } | 2621 | } |
2622 | } | 2622 | } |
2623 | 2623 | ||
2624 | /* double-check if current erp/cqr was successfull */ | 2624 | /* double-check if current erp/cqr was successful */ |
2625 | if ((cqr->irb.scsw.cmd.cstat == 0x00) && | 2625 | if ((cqr->irb.scsw.cmd.cstat == 0x00) && |
2626 | (cqr->irb.scsw.cmd.dstat == | 2626 | (cqr->irb.scsw.cmd.dstat == |
2627 | (DEV_STAT_CHN_END | DEV_STAT_DEV_END))) { | 2627 | (DEV_STAT_CHN_END | DEV_STAT_DEV_END))) { |
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index 05a14536c369..4a39084d9c95 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h | |||
@@ -199,7 +199,7 @@ struct dasd_ccw_req { | |||
199 | #define DASD_CQR_ERROR 0x82 /* request is completed with error */ | 199 | #define DASD_CQR_ERROR 0x82 /* request is completed with error */ |
200 | #define DASD_CQR_CLEAR_PENDING 0x83 /* request is clear pending */ | 200 | #define DASD_CQR_CLEAR_PENDING 0x83 /* request is clear pending */ |
201 | #define DASD_CQR_CLEARED 0x84 /* request was cleared */ | 201 | #define DASD_CQR_CLEARED 0x84 /* request was cleared */ |
202 | #define DASD_CQR_SUCCESS 0x85 /* request was successfull */ | 202 | #define DASD_CQR_SUCCESS 0x85 /* request was successful */ |
203 | 203 | ||
204 | 204 | ||
205 | /* per dasd_ccw_req flags */ | 205 | /* per dasd_ccw_req flags */ |
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c index 4005c44a404c..71605a179d65 100644 --- a/drivers/s390/char/tape_3590.c +++ b/drivers/s390/char/tape_3590.c | |||
@@ -801,7 +801,7 @@ tape_3590_done(struct tape_device *device, struct tape_request *request) | |||
801 | static inline int | 801 | static inline int |
802 | tape_3590_erp_succeded(struct tape_device *device, struct tape_request *request) | 802 | tape_3590_erp_succeded(struct tape_device *device, struct tape_request *request) |
803 | { | 803 | { |
804 | DBF_EVENT(3, "Error Recovery successfull for %s\n", | 804 | DBF_EVENT(3, "Error Recovery successful for %s\n", |
805 | tape_op_verbose[request->op]); | 805 | tape_op_verbose[request->op]); |
806 | return tape_3590_done(device, request); | 806 | return tape_3590_done(device, request); |
807 | } | 807 | } |
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 06b71823f399..659f8a791656 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c | |||
@@ -379,7 +379,7 @@ int cio_commit_config(struct subchannel *sch) | |||
379 | if (ccode < 0) /* -EIO if msch gets a program check. */ | 379 | if (ccode < 0) /* -EIO if msch gets a program check. */ |
380 | return ccode; | 380 | return ccode; |
381 | switch (ccode) { | 381 | switch (ccode) { |
382 | case 0: /* successfull */ | 382 | case 0: /* successful */ |
383 | if (stsch(sch->schid, &schib) || | 383 | if (stsch(sch->schid, &schib) || |
384 | !css_sch_is_valid(&schib)) | 384 | !css_sch_is_valid(&schib)) |
385 | return -ENODEV; | 385 | return -ENODEV; |
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 744f928a59ea..10cb0f8726e5 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c | |||
@@ -114,7 +114,7 @@ static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq) | |||
114 | * @count: count of buffers to examine | 114 | * @count: count of buffers to examine |
115 | * @auto_ack: automatically acknowledge buffers | 115 | * @auto_ack: automatically acknowledge buffers |
116 | * | 116 | * |
117 | * Returns the number of successfull extracted equal buffer states. | 117 | * Returns the number of successfully extracted equal buffer states. |
118 | * Stops processing if a state is different from the last buffers state. | 118 | * Stops processing if a state is different from the last buffers state. |
119 | */ | 119 | */ |
120 | static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, | 120 | static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, |
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index b7322976d2b7..256c7bec7bd7 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig | |||
@@ -884,6 +884,7 @@ config SCSI_IBMVSCSI | |||
884 | tristate "IBM Virtual SCSI support" | 884 | tristate "IBM Virtual SCSI support" |
885 | depends on PPC_PSERIES || PPC_ISERIES | 885 | depends on PPC_PSERIES || PPC_ISERIES |
886 | select SCSI_SRP_ATTRS | 886 | select SCSI_SRP_ATTRS |
887 | select VIOPATH if PPC_ISERIES | ||
887 | help | 888 | help |
888 | This is the IBM POWER Virtual SCSI Client | 889 | This is the IBM POWER Virtual SCSI Client |
889 | 890 | ||
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig index b695ab3142d8..3e525e38a5d9 100644 --- a/drivers/serial/Kconfig +++ b/drivers/serial/Kconfig | |||
@@ -457,7 +457,7 @@ config SERIAL_SAMSUNG | |||
457 | 457 | ||
458 | config SERIAL_SAMSUNG_UARTS | 458 | config SERIAL_SAMSUNG_UARTS |
459 | int | 459 | int |
460 | depends on SERIAL_SAMSUNG | 460 | depends on ARM && PLAT_S3C |
461 | default 2 if ARCH_S3C2400 | 461 | default 2 if ARCH_S3C2400 |
462 | default 4 if ARCH_S3C64XX || CPU_S3C2443 | 462 | default 4 if ARCH_S3C64XX || CPU_S3C2443 |
463 | default 3 | 463 | default 3 |
@@ -1320,13 +1320,30 @@ config SERIAL_NETX_CONSOLE | |||
1320 | config SERIAL_OF_PLATFORM | 1320 | config SERIAL_OF_PLATFORM |
1321 | tristate "Serial port on Open Firmware platform bus" | 1321 | tristate "Serial port on Open Firmware platform bus" |
1322 | depends on PPC_OF | 1322 | depends on PPC_OF |
1323 | depends on SERIAL_8250 | 1323 | depends on SERIAL_8250 || SERIAL_OF_PLATFORM_NWPSERIAL |
1324 | help | 1324 | help |
1325 | If you have a PowerPC based system that has serial ports | 1325 | If you have a PowerPC based system that has serial ports |
1326 | on a platform specific bus, you should enable this option. | 1326 | on a platform specific bus, you should enable this option. |
1327 | Currently, only 8250 compatible ports are supported, but | 1327 | Currently, only 8250 compatible ports are supported, but |
1328 | others can easily be added. | 1328 | others can easily be added. |
1329 | 1329 | ||
1330 | config SERIAL_OF_PLATFORM_NWPSERIAL | ||
1331 | tristate "NWP serial port driver" | ||
1332 | depends on PPC_OF && PPC_DCR | ||
1333 | select SERIAL_OF_PLATFORM | ||
1334 | select SERIAL_CORE_CONSOLE | ||
1335 | select SERIAL_CORE | ||
1336 | help | ||
1337 | This driver supports the cell network processor nwp serial | ||
1338 | device. | ||
1339 | |||
1340 | config SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE | ||
1341 | bool "Console on NWP serial port" | ||
1342 | depends on SERIAL_OF_PLATFORM_NWPSERIAL=y | ||
1343 | select SERIAL_CORE_CONSOLE | ||
1344 | help | ||
1345 | Support for Console on the NWP serial ports. | ||
1346 | |||
1330 | config SERIAL_QE | 1347 | config SERIAL_QE |
1331 | tristate "Freescale QUICC Engine serial port support" | 1348 | tristate "Freescale QUICC Engine serial port support" |
1332 | depends on QUICC_ENGINE | 1349 | depends on QUICC_ENGINE |
diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile index dfe775ac45b2..8844c0a03929 100644 --- a/drivers/serial/Makefile +++ b/drivers/serial/Makefile | |||
@@ -72,6 +72,7 @@ obj-$(CONFIG_SERIAL_ATMEL) += atmel_serial.o | |||
72 | obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o | 72 | obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o |
73 | obj-$(CONFIG_SERIAL_NETX) += netx-serial.o | 73 | obj-$(CONFIG_SERIAL_NETX) += netx-serial.o |
74 | obj-$(CONFIG_SERIAL_OF_PLATFORM) += of_serial.o | 74 | obj-$(CONFIG_SERIAL_OF_PLATFORM) += of_serial.o |
75 | obj-$(CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL) += nwpserial.o | ||
75 | obj-$(CONFIG_SERIAL_KS8695) += serial_ks8695.o | 76 | obj-$(CONFIG_SERIAL_KS8695) += serial_ks8695.o |
76 | obj-$(CONFIG_KGDB_SERIAL_CONSOLE) += kgdboc.o | 77 | obj-$(CONFIG_KGDB_SERIAL_CONSOLE) += kgdboc.o |
77 | obj-$(CONFIG_SERIAL_QE) += ucc_uart.o | 78 | obj-$(CONFIG_SERIAL_QE) += ucc_uart.o |
diff --git a/drivers/serial/nwpserial.c b/drivers/serial/nwpserial.c new file mode 100644 index 000000000000..32f3eaf0d262 --- /dev/null +++ b/drivers/serial/nwpserial.c | |||
@@ -0,0 +1,475 @@ | |||
1 | /* | ||
2 | * Serial Port driver for a NWP uart device | ||
3 | * | ||
4 | * Copyright (C) 2008 IBM Corp., Benjamin Krill <ben@codiert.org> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | * | ||
11 | */ | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/console.h> | ||
14 | #include <linux/serial.h> | ||
15 | #include <linux/serial_reg.h> | ||
16 | #include <linux/serial_core.h> | ||
17 | #include <linux/tty.h> | ||
18 | #include <linux/irqreturn.h> | ||
19 | #include <linux/mutex.h> | ||
20 | #include <linux/of_platform.h> | ||
21 | #include <linux/of_device.h> | ||
22 | #include <linux/nwpserial.h> | ||
23 | #include <asm/prom.h> | ||
24 | #include <asm/dcr.h> | ||
25 | |||
26 | #define NWPSERIAL_NR 2 | ||
27 | |||
28 | #define NWPSERIAL_STATUS_RXVALID 0x1 | ||
29 | #define NWPSERIAL_STATUS_TXFULL 0x2 | ||
30 | |||
31 | struct nwpserial_port { | ||
32 | struct uart_port port; | ||
33 | dcr_host_t dcr_host; | ||
34 | unsigned int ier; | ||
35 | unsigned int mcr; | ||
36 | }; | ||
37 | |||
38 | static DEFINE_MUTEX(nwpserial_mutex); | ||
39 | static struct nwpserial_port nwpserial_ports[NWPSERIAL_NR]; | ||
40 | |||
41 | static void wait_for_bits(struct nwpserial_port *up, int bits) | ||
42 | { | ||
43 | unsigned int status, tmout = 10000; | ||
44 | |||
45 | /* Wait up to 10ms for the character(s) to be sent. */ | ||
46 | do { | ||
47 | status = dcr_read(up->dcr_host, UART_LSR); | ||
48 | |||
49 | if (--tmout == 0) | ||
50 | break; | ||
51 | udelay(1); | ||
52 | } while ((status & bits) != bits); | ||
53 | } | ||
54 | |||
55 | #ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE | ||
56 | static void nwpserial_console_putchar(struct uart_port *port, int c) | ||
57 | { | ||
58 | struct nwpserial_port *up; | ||
59 | up = container_of(port, struct nwpserial_port, port); | ||
60 | /* check if tx buffer is full */ | ||
61 | wait_for_bits(up, UART_LSR_THRE); | ||
62 | dcr_write(up->dcr_host, UART_TX, c); | ||
63 | up->port.icount.tx++; | ||
64 | } | ||
65 | |||
66 | static void | ||
67 | nwpserial_console_write(struct console *co, const char *s, unsigned int count) | ||
68 | { | ||
69 | struct nwpserial_port *up = &nwpserial_ports[co->index]; | ||
70 | unsigned long flags; | ||
71 | int locked = 1; | ||
72 | |||
73 | if (oops_in_progress) | ||
74 | locked = spin_trylock_irqsave(&up->port.lock, flags); | ||
75 | else | ||
76 | spin_lock_irqsave(&up->port.lock, flags); | ||
77 | |||
78 | /* save and disable interrupt */ | ||
79 | up->ier = dcr_read(up->dcr_host, UART_IER); | ||
80 | dcr_write(up->dcr_host, UART_IER, up->ier & ~UART_IER_RDI); | ||
81 | |||
82 | uart_console_write(&up->port, s, count, nwpserial_console_putchar); | ||
83 | |||
84 | /* wait for transmitter to become emtpy */ | ||
85 | while ((dcr_read(up->dcr_host, UART_LSR) & UART_LSR_THRE) == 0) | ||
86 | cpu_relax(); | ||
87 | |||
88 | /* restore interrupt state */ | ||
89 | dcr_write(up->dcr_host, UART_IER, up->ier); | ||
90 | |||
91 | if (locked) | ||
92 | spin_unlock_irqrestore(&up->port.lock, flags); | ||
93 | } | ||
94 | |||
95 | static struct uart_driver nwpserial_reg; | ||
96 | static struct console nwpserial_console = { | ||
97 | .name = "ttySQ", | ||
98 | .write = nwpserial_console_write, | ||
99 | .device = uart_console_device, | ||
100 | .flags = CON_PRINTBUFFER, | ||
101 | .index = -1, | ||
102 | .data = &nwpserial_reg, | ||
103 | }; | ||
104 | #define NWPSERIAL_CONSOLE (&nwpserial_console) | ||
105 | #else | ||
106 | #define NWPSERIAL_CONSOLE NULL | ||
107 | #endif /* CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE */ | ||
108 | |||
109 | /**************************************************************************/ | ||
110 | |||
111 | static int nwpserial_request_port(struct uart_port *port) | ||
112 | { | ||
113 | return 0; | ||
114 | } | ||
115 | |||
116 | static void nwpserial_release_port(struct uart_port *port) | ||
117 | { | ||
118 | /* N/A */ | ||
119 | } | ||
120 | |||
121 | static void nwpserial_config_port(struct uart_port *port, int flags) | ||
122 | { | ||
123 | port->type = PORT_NWPSERIAL; | ||
124 | } | ||
125 | |||
126 | static irqreturn_t nwpserial_interrupt(int irq, void *dev_id) | ||
127 | { | ||
128 | struct nwpserial_port *up = dev_id; | ||
129 | struct tty_struct *tty = up->port.info->port.tty; | ||
130 | irqreturn_t ret; | ||
131 | unsigned int iir; | ||
132 | unsigned char ch; | ||
133 | |||
134 | spin_lock(&up->port.lock); | ||
135 | |||
136 | /* check if the uart was the interrupt source. */ | ||
137 | iir = dcr_read(up->dcr_host, UART_IIR); | ||
138 | if (!iir) { | ||
139 | ret = IRQ_NONE; | ||
140 | goto out; | ||
141 | } | ||
142 | |||
143 | do { | ||
144 | up->port.icount.rx++; | ||
145 | ch = dcr_read(up->dcr_host, UART_RX); | ||
146 | if (up->port.ignore_status_mask != NWPSERIAL_STATUS_RXVALID) | ||
147 | tty_insert_flip_char(tty, ch, TTY_NORMAL); | ||
148 | } while (dcr_read(up->dcr_host, UART_RX) & UART_LSR_DR); | ||
149 | |||
150 | tty_flip_buffer_push(tty); | ||
151 | ret = IRQ_HANDLED; | ||
152 | |||
153 | out: | ||
154 | spin_unlock(&up->port.lock); | ||
155 | return ret; | ||
156 | } | ||
157 | |||
158 | static int nwpserial_startup(struct uart_port *port) | ||
159 | { | ||
160 | struct nwpserial_port *up; | ||
161 | int err; | ||
162 | |||
163 | up = container_of(port, struct nwpserial_port, port); | ||
164 | |||
165 | /* disable flow control by default */ | ||
166 | up->mcr = dcr_read(up->dcr_host, UART_MCR) & ~UART_MCR_AFE; | ||
167 | dcr_write(up->dcr_host, UART_MCR, up->mcr); | ||
168 | |||
169 | /* register interrupt handler */ | ||
170 | err = request_irq(up->port.irq, nwpserial_interrupt, | ||
171 | IRQF_SHARED, "nwpserial", up); | ||
172 | if (err) | ||
173 | return err; | ||
174 | |||
175 | /* enable interrupts */ | ||
176 | up->ier = UART_IER_RDI; | ||
177 | dcr_write(up->dcr_host, UART_IER, up->ier); | ||
178 | |||
179 | /* enable receiving */ | ||
180 | up->port.ignore_status_mask &= ~NWPSERIAL_STATUS_RXVALID; | ||
181 | |||
182 | return 0; | ||
183 | } | ||
184 | |||
185 | static void nwpserial_shutdown(struct uart_port *port) | ||
186 | { | ||
187 | struct nwpserial_port *up; | ||
188 | up = container_of(port, struct nwpserial_port, port); | ||
189 | |||
190 | /* disable receiving */ | ||
191 | up->port.ignore_status_mask |= NWPSERIAL_STATUS_RXVALID; | ||
192 | |||
193 | /* disable interrupts from this port */ | ||
194 | up->ier = 0; | ||
195 | dcr_write(up->dcr_host, UART_IER, up->ier); | ||
196 | |||
197 | /* free irq */ | ||
198 | free_irq(up->port.irq, port); | ||
199 | } | ||
200 | |||
201 | static int nwpserial_verify_port(struct uart_port *port, | ||
202 | struct serial_struct *ser) | ||
203 | { | ||
204 | return -EINVAL; | ||
205 | } | ||
206 | |||
207 | static const char *nwpserial_type(struct uart_port *port) | ||
208 | { | ||
209 | return port->type == PORT_NWPSERIAL ? "nwpserial" : NULL; | ||
210 | } | ||
211 | |||
212 | static void nwpserial_set_termios(struct uart_port *port, | ||
213 | struct ktermios *termios, struct ktermios *old) | ||
214 | { | ||
215 | struct nwpserial_port *up; | ||
216 | up = container_of(port, struct nwpserial_port, port); | ||
217 | |||
218 | up->port.read_status_mask = NWPSERIAL_STATUS_RXVALID | ||
219 | | NWPSERIAL_STATUS_TXFULL; | ||
220 | |||
221 | up->port.ignore_status_mask = 0; | ||
222 | /* ignore all characters if CREAD is not set */ | ||
223 | if ((termios->c_cflag & CREAD) == 0) | ||
224 | up->port.ignore_status_mask |= NWPSERIAL_STATUS_RXVALID; | ||
225 | |||
226 | /* Copy back the old hardware settings */ | ||
227 | if (old) | ||
228 | tty_termios_copy_hw(termios, old); | ||
229 | } | ||
230 | |||
231 | static void nwpserial_break_ctl(struct uart_port *port, int ctl) | ||
232 | { | ||
233 | /* N/A */ | ||
234 | } | ||
235 | |||
236 | static void nwpserial_enable_ms(struct uart_port *port) | ||
237 | { | ||
238 | /* N/A */ | ||
239 | } | ||
240 | |||
241 | static void nwpserial_stop_rx(struct uart_port *port) | ||
242 | { | ||
243 | struct nwpserial_port *up; | ||
244 | up = container_of(port, struct nwpserial_port, port); | ||
245 | /* don't forward any more data (like !CREAD) */ | ||
246 | up->port.ignore_status_mask = NWPSERIAL_STATUS_RXVALID; | ||
247 | } | ||
248 | |||
249 | static void nwpserial_putchar(struct nwpserial_port *up, unsigned char c) | ||
250 | { | ||
251 | /* check if tx buffer is full */ | ||
252 | wait_for_bits(up, UART_LSR_THRE); | ||
253 | dcr_write(up->dcr_host, UART_TX, c); | ||
254 | up->port.icount.tx++; | ||
255 | } | ||
256 | |||
257 | static void nwpserial_start_tx(struct uart_port *port) | ||
258 | { | ||
259 | struct nwpserial_port *up; | ||
260 | struct circ_buf *xmit; | ||
261 | up = container_of(port, struct nwpserial_port, port); | ||
262 | xmit = &up->port.info->xmit; | ||
263 | |||
264 | if (port->x_char) { | ||
265 | nwpserial_putchar(up, up->port.x_char); | ||
266 | port->x_char = 0; | ||
267 | } | ||
268 | |||
269 | while (!(uart_circ_empty(xmit) || uart_tx_stopped(&up->port))) { | ||
270 | nwpserial_putchar(up, xmit->buf[xmit->tail]); | ||
271 | xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE-1); | ||
272 | } | ||
273 | } | ||
274 | |||
275 | static unsigned int nwpserial_get_mctrl(struct uart_port *port) | ||
276 | { | ||
277 | return 0; | ||
278 | } | ||
279 | |||
280 | static void nwpserial_set_mctrl(struct uart_port *port, unsigned int mctrl) | ||
281 | { | ||
282 | /* N/A */ | ||
283 | } | ||
284 | |||
285 | static void nwpserial_stop_tx(struct uart_port *port) | ||
286 | { | ||
287 | /* N/A */ | ||
288 | } | ||
289 | |||
290 | static unsigned int nwpserial_tx_empty(struct uart_port *port) | ||
291 | { | ||
292 | struct nwpserial_port *up; | ||
293 | unsigned long flags; | ||
294 | int ret; | ||
295 | up = container_of(port, struct nwpserial_port, port); | ||
296 | |||
297 | spin_lock_irqsave(&up->port.lock, flags); | ||
298 | ret = dcr_read(up->dcr_host, UART_LSR); | ||
299 | spin_unlock_irqrestore(&up->port.lock, flags); | ||
300 | |||
301 | return ret & UART_LSR_TEMT ? TIOCSER_TEMT : 0; | ||
302 | } | ||
303 | |||
304 | static struct uart_ops nwpserial_pops = { | ||
305 | .tx_empty = nwpserial_tx_empty, | ||
306 | .set_mctrl = nwpserial_set_mctrl, | ||
307 | .get_mctrl = nwpserial_get_mctrl, | ||
308 | .stop_tx = nwpserial_stop_tx, | ||
309 | .start_tx = nwpserial_start_tx, | ||
310 | .stop_rx = nwpserial_stop_rx, | ||
311 | .enable_ms = nwpserial_enable_ms, | ||
312 | .break_ctl = nwpserial_break_ctl, | ||
313 | .startup = nwpserial_startup, | ||
314 | .shutdown = nwpserial_shutdown, | ||
315 | .set_termios = nwpserial_set_termios, | ||
316 | .type = nwpserial_type, | ||
317 | .release_port = nwpserial_release_port, | ||
318 | .request_port = nwpserial_request_port, | ||
319 | .config_port = nwpserial_config_port, | ||
320 | .verify_port = nwpserial_verify_port, | ||
321 | }; | ||
322 | |||
323 | static struct uart_driver nwpserial_reg = { | ||
324 | .owner = THIS_MODULE, | ||
325 | .driver_name = "nwpserial", | ||
326 | .dev_name = "ttySQ", | ||
327 | .major = TTY_MAJOR, | ||
328 | .minor = 68, | ||
329 | .nr = NWPSERIAL_NR, | ||
330 | .cons = NWPSERIAL_CONSOLE, | ||
331 | }; | ||
332 | |||
333 | int nwpserial_register_port(struct uart_port *port) | ||
334 | { | ||
335 | struct nwpserial_port *up = NULL; | ||
336 | int ret = -1; | ||
337 | int i; | ||
338 | static int first = 1; | ||
339 | int dcr_len; | ||
340 | int dcr_base; | ||
341 | struct device_node *dn; | ||
342 | |||
343 | mutex_lock(&nwpserial_mutex); | ||
344 | |||
345 | dn = to_of_device(port->dev)->node; | ||
346 | if (dn == NULL) | ||
347 | goto out; | ||
348 | |||
349 | /* get dcr base. */ | ||
350 | dcr_base = dcr_resource_start(dn, 0); | ||
351 | |||
352 | /* find matching entry */ | ||
353 | for (i = 0; i < NWPSERIAL_NR; i++) | ||
354 | if (nwpserial_ports[i].port.iobase == dcr_base) { | ||
355 | up = &nwpserial_ports[i]; | ||
356 | break; | ||
357 | } | ||
358 | |||
359 | /* we didn't find a mtching entry, search for a free port */ | ||
360 | if (up == NULL) | ||
361 | for (i = 0; i < NWPSERIAL_NR; i++) | ||
362 | if (nwpserial_ports[i].port.type == PORT_UNKNOWN && | ||
363 | nwpserial_ports[i].port.iobase == 0) { | ||
364 | up = &nwpserial_ports[i]; | ||
365 | break; | ||
366 | } | ||
367 | |||
368 | if (up == NULL) { | ||
369 | ret = -EBUSY; | ||
370 | goto out; | ||
371 | } | ||
372 | |||
373 | if (first) | ||
374 | uart_register_driver(&nwpserial_reg); | ||
375 | first = 0; | ||
376 | |||
377 | up->port.membase = port->membase; | ||
378 | up->port.irq = port->irq; | ||
379 | up->port.uartclk = port->uartclk; | ||
380 | up->port.fifosize = port->fifosize; | ||
381 | up->port.regshift = port->regshift; | ||
382 | up->port.iotype = port->iotype; | ||
383 | up->port.flags = port->flags; | ||
384 | up->port.mapbase = port->mapbase; | ||
385 | up->port.private_data = port->private_data; | ||
386 | |||
387 | if (port->dev) | ||
388 | up->port.dev = port->dev; | ||
389 | |||
390 | if (up->port.iobase != dcr_base) { | ||
391 | up->port.ops = &nwpserial_pops; | ||
392 | up->port.fifosize = 16; | ||
393 | |||
394 | spin_lock_init(&up->port.lock); | ||
395 | |||
396 | up->port.iobase = dcr_base; | ||
397 | dcr_len = dcr_resource_len(dn, 0); | ||
398 | |||
399 | up->dcr_host = dcr_map(dn, dcr_base, dcr_len); | ||
400 | if (!DCR_MAP_OK(up->dcr_host)) { | ||
401 | printk(KERN_ERR "Cannot map DCR resources for NWPSERIAL"); | ||
402 | goto out; | ||
403 | } | ||
404 | } | ||
405 | |||
406 | ret = uart_add_one_port(&nwpserial_reg, &up->port); | ||
407 | if (ret == 0) | ||
408 | ret = up->port.line; | ||
409 | |||
410 | out: | ||
411 | mutex_unlock(&nwpserial_mutex); | ||
412 | |||
413 | return ret; | ||
414 | } | ||
415 | EXPORT_SYMBOL(nwpserial_register_port); | ||
416 | |||
417 | void nwpserial_unregister_port(int line) | ||
418 | { | ||
419 | struct nwpserial_port *up = &nwpserial_ports[line]; | ||
420 | mutex_lock(&nwpserial_mutex); | ||
421 | uart_remove_one_port(&nwpserial_reg, &up->port); | ||
422 | |||
423 | up->port.type = PORT_UNKNOWN; | ||
424 | |||
425 | mutex_unlock(&nwpserial_mutex); | ||
426 | } | ||
427 | EXPORT_SYMBOL(nwpserial_unregister_port); | ||
428 | |||
429 | #ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE | ||
430 | static int __init nwpserial_console_init(void) | ||
431 | { | ||
432 | struct nwpserial_port *up = NULL; | ||
433 | struct device_node *dn; | ||
434 | const char *name; | ||
435 | int dcr_base; | ||
436 | int dcr_len; | ||
437 | int i; | ||
438 | |||
439 | /* search for a free port */ | ||
440 | for (i = 0; i < NWPSERIAL_NR; i++) | ||
441 | if (nwpserial_ports[i].port.type == PORT_UNKNOWN) { | ||
442 | up = &nwpserial_ports[i]; | ||
443 | break; | ||
444 | } | ||
445 | |||
446 | if (up == NULL) | ||
447 | return -1; | ||
448 | |||
449 | name = of_get_property(of_chosen, "linux,stdout-path", NULL); | ||
450 | if (name == NULL) | ||
451 | return -1; | ||
452 | |||
453 | dn = of_find_node_by_path(name); | ||
454 | if (!dn) | ||
455 | return -1; | ||
456 | |||
457 | spin_lock_init(&up->port.lock); | ||
458 | up->port.ops = &nwpserial_pops; | ||
459 | up->port.type = PORT_NWPSERIAL; | ||
460 | up->port.fifosize = 16; | ||
461 | |||
462 | dcr_base = dcr_resource_start(dn, 0); | ||
463 | dcr_len = dcr_resource_len(dn, 0); | ||
464 | up->port.iobase = dcr_base; | ||
465 | |||
466 | up->dcr_host = dcr_map(dn, dcr_base, dcr_len); | ||
467 | if (!DCR_MAP_OK(up->dcr_host)) { | ||
468 | printk("Cannot map DCR resources for SERIAL"); | ||
469 | return -1; | ||
470 | } | ||
471 | register_console(&nwpserial_console); | ||
472 | return 0; | ||
473 | } | ||
474 | console_initcall(nwpserial_console_init); | ||
475 | #endif /* CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE */ | ||
diff --git a/drivers/serial/of_serial.c b/drivers/serial/of_serial.c index 8fa0ff561e9f..a821e3a3d664 100644 --- a/drivers/serial/of_serial.c +++ b/drivers/serial/of_serial.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/serial_core.h> | 14 | #include <linux/serial_core.h> |
15 | #include <linux/serial_8250.h> | 15 | #include <linux/serial_8250.h> |
16 | #include <linux/of_platform.h> | 16 | #include <linux/of_platform.h> |
17 | #include <linux/nwpserial.h> | ||
17 | 18 | ||
18 | #include <asm/prom.h> | 19 | #include <asm/prom.h> |
19 | 20 | ||
@@ -99,9 +100,16 @@ static int __devinit of_platform_serial_probe(struct of_device *ofdev, | |||
99 | goto out; | 100 | goto out; |
100 | 101 | ||
101 | switch (port_type) { | 102 | switch (port_type) { |
103 | #ifdef CONFIG_SERIAL_8250 | ||
102 | case PORT_8250 ... PORT_MAX_8250: | 104 | case PORT_8250 ... PORT_MAX_8250: |
103 | ret = serial8250_register_port(&port); | 105 | ret = serial8250_register_port(&port); |
104 | break; | 106 | break; |
107 | #endif | ||
108 | #ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL | ||
109 | case PORT_NWPSERIAL: | ||
110 | ret = nwpserial_register_port(&port); | ||
111 | break; | ||
112 | #endif | ||
105 | default: | 113 | default: |
106 | /* need to add code for these */ | 114 | /* need to add code for these */ |
107 | case PORT_UNKNOWN: | 115 | case PORT_UNKNOWN: |
@@ -129,9 +137,16 @@ static int of_platform_serial_remove(struct of_device *ofdev) | |||
129 | { | 137 | { |
130 | struct of_serial_info *info = ofdev->dev.driver_data; | 138 | struct of_serial_info *info = ofdev->dev.driver_data; |
131 | switch (info->type) { | 139 | switch (info->type) { |
140 | #ifdef CONFIG_SERIAL_8250 | ||
132 | case PORT_8250 ... PORT_MAX_8250: | 141 | case PORT_8250 ... PORT_MAX_8250: |
133 | serial8250_unregister_port(info->line); | 142 | serial8250_unregister_port(info->line); |
134 | break; | 143 | break; |
144 | #endif | ||
145 | #ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL | ||
146 | case PORT_NWPSERIAL: | ||
147 | nwpserial_unregister_port(info->line); | ||
148 | break; | ||
149 | #endif | ||
135 | default: | 150 | default: |
136 | /* need to add code for these */ | 151 | /* need to add code for these */ |
137 | break; | 152 | break; |
@@ -148,6 +163,10 @@ static struct of_device_id __devinitdata of_platform_serial_table[] = { | |||
148 | { .type = "serial", .compatible = "ns16450", .data = (void *)PORT_16450, }, | 163 | { .type = "serial", .compatible = "ns16450", .data = (void *)PORT_16450, }, |
149 | { .type = "serial", .compatible = "ns16550", .data = (void *)PORT_16550, }, | 164 | { .type = "serial", .compatible = "ns16550", .data = (void *)PORT_16550, }, |
150 | { .type = "serial", .compatible = "ns16750", .data = (void *)PORT_16750, }, | 165 | { .type = "serial", .compatible = "ns16750", .data = (void *)PORT_16750, }, |
166 | #ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL | ||
167 | { .type = "serial", .compatible = "ibm,qpace-nwp-serial", | ||
168 | .data = (void *)PORT_NWPSERIAL, }, | ||
169 | #endif | ||
151 | { .type = "serial", .data = (void *)PORT_UNKNOWN, }, | 170 | { .type = "serial", .data = (void *)PORT_UNKNOWN, }, |
152 | { /* end of list */ }, | 171 | { /* end of list */ }, |
153 | }; | 172 | }; |
diff --git a/drivers/video/amba-clcd.c b/drivers/video/amba-clcd.c index 2ac52fd8cc11..4e046fed1380 100644 --- a/drivers/video/amba-clcd.c +++ b/drivers/video/amba-clcd.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/amba/bus.h> | 24 | #include <linux/amba/bus.h> |
25 | #include <linux/amba/clcd.h> | 25 | #include <linux/amba/clcd.h> |
26 | #include <linux/clk.h> | 26 | #include <linux/clk.h> |
27 | #include <linux/hardirq.h> | ||
27 | 28 | ||
28 | #include <asm/sizes.h> | 29 | #include <asm/sizes.h> |
29 | 30 | ||
diff --git a/drivers/w1/masters/Kconfig b/drivers/w1/masters/Kconfig index 90616822cd20..96d2f8e4c275 100644 --- a/drivers/w1/masters/Kconfig +++ b/drivers/w1/masters/Kconfig | |||
@@ -34,6 +34,12 @@ config W1_MASTER_DS2482 | |||
34 | This driver can also be built as a module. If so, the module | 34 | This driver can also be built as a module. If so, the module |
35 | will be called ds2482. | 35 | will be called ds2482. |
36 | 36 | ||
37 | config W1_MASTER_MXC | ||
38 | tristate "Freescale MXC 1-wire busmaster" | ||
39 | depends on W1 && ARCH_MXC | ||
40 | help | ||
41 | Say Y here to enable MXC 1-wire host | ||
42 | |||
37 | config W1_MASTER_DS1WM | 43 | config W1_MASTER_DS1WM |
38 | tristate "Maxim DS1WM 1-wire busmaster" | 44 | tristate "Maxim DS1WM 1-wire busmaster" |
39 | depends on W1 && ARM && HAVE_CLK | 45 | depends on W1 && ARM && HAVE_CLK |
diff --git a/drivers/w1/masters/Makefile b/drivers/w1/masters/Makefile index bc4714a75f3a..c5a3e96fcbab 100644 --- a/drivers/w1/masters/Makefile +++ b/drivers/w1/masters/Makefile | |||
@@ -5,6 +5,8 @@ | |||
5 | obj-$(CONFIG_W1_MASTER_MATROX) += matrox_w1.o | 5 | obj-$(CONFIG_W1_MASTER_MATROX) += matrox_w1.o |
6 | obj-$(CONFIG_W1_MASTER_DS2490) += ds2490.o | 6 | obj-$(CONFIG_W1_MASTER_DS2490) += ds2490.o |
7 | obj-$(CONFIG_W1_MASTER_DS2482) += ds2482.o | 7 | obj-$(CONFIG_W1_MASTER_DS2482) += ds2482.o |
8 | obj-$(CONFIG_W1_MASTER_MXC) += mxc_w1.o | ||
9 | |||
8 | obj-$(CONFIG_W1_MASTER_DS1WM) += ds1wm.o | 10 | obj-$(CONFIG_W1_MASTER_DS1WM) += ds1wm.o |
9 | obj-$(CONFIG_W1_MASTER_GPIO) += w1-gpio.o | 11 | obj-$(CONFIG_W1_MASTER_GPIO) += w1-gpio.o |
10 | obj-$(CONFIG_HDQ_MASTER_OMAP) += omap_hdq.o | 12 | obj-$(CONFIG_HDQ_MASTER_OMAP) += omap_hdq.o |
diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c new file mode 100644 index 000000000000..b9d74d0b353e --- /dev/null +++ b/drivers/w1/masters/mxc_w1.c | |||
@@ -0,0 +1,211 @@ | |||
1 | /* | ||
2 | * Copyright 2005-2008 Freescale Semiconductor, Inc. All Rights Reserved. | ||
3 | * Copyright 2008 Luotao Fu, kernel@pengutronix.de | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation; either version 2 | ||
8 | * of the License, or (at your option) any later version. | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #include <linux/module.h> | ||
21 | #include <linux/interrupt.h> | ||
22 | #include <linux/platform_device.h> | ||
23 | #include <linux/clk.h> | ||
24 | #include <linux/delay.h> | ||
25 | #include <linux/io.h> | ||
26 | |||
27 | #include "../w1.h" | ||
28 | #include "../w1_int.h" | ||
29 | #include "../w1_log.h" | ||
30 | |||
31 | /* According to the mx27 Datasheet the reset procedure should take up to about | ||
32 | * 1350us. We set the timeout to 500*100us = 50ms for sure */ | ||
33 | #define MXC_W1_RESET_TIMEOUT 500 | ||
34 | |||
35 | /* | ||
36 | * MXC W1 Register offsets | ||
37 | */ | ||
38 | #define MXC_W1_CONTROL 0x00 | ||
39 | #define MXC_W1_TIME_DIVIDER 0x02 | ||
40 | #define MXC_W1_RESET 0x04 | ||
41 | #define MXC_W1_COMMAND 0x06 | ||
42 | #define MXC_W1_TXRX 0x08 | ||
43 | #define MXC_W1_INTERRUPT 0x0A | ||
44 | #define MXC_W1_INTERRUPT_EN 0x0C | ||
45 | |||
46 | struct mxc_w1_device { | ||
47 | void __iomem *regs; | ||
48 | unsigned int clkdiv; | ||
49 | struct clk *clk; | ||
50 | struct w1_bus_master bus_master; | ||
51 | }; | ||
52 | |||
53 | /* | ||
54 | * this is the low level routine to | ||
55 | * reset the device on the One Wire interface | ||
56 | * on the hardware | ||
57 | */ | ||
58 | static u8 mxc_w1_ds2_reset_bus(void *data) | ||
59 | { | ||
60 | u8 reg_val; | ||
61 | unsigned int timeout_cnt = 0; | ||
62 | struct mxc_w1_device *dev = data; | ||
63 | |||
64 | __raw_writeb(0x80, (dev->regs + MXC_W1_CONTROL)); | ||
65 | |||
66 | while (1) { | ||
67 | reg_val = __raw_readb(dev->regs + MXC_W1_CONTROL); | ||
68 | |||
69 | if (((reg_val >> 7) & 0x1) == 0 || | ||
70 | timeout_cnt > MXC_W1_RESET_TIMEOUT) | ||
71 | break; | ||
72 | else | ||
73 | timeout_cnt++; | ||
74 | |||
75 | udelay(100); | ||
76 | } | ||
77 | return (reg_val >> 7) & 0x1; | ||
78 | } | ||
79 | |||
80 | /* | ||
81 | * this is the low level routine to read/write a bit on the One Wire | ||
82 | * interface on the hardware. It does write 0 if parameter bit is set | ||
83 | * to 0, otherwise a write 1/read. | ||
84 | */ | ||
85 | static u8 mxc_w1_ds2_touch_bit(void *data, u8 bit) | ||
86 | { | ||
87 | struct mxc_w1_device *mdev = data; | ||
88 | void __iomem *ctrl_addr = mdev->regs + MXC_W1_CONTROL; | ||
89 | unsigned int timeout_cnt = 400; /* Takes max. 120us according to | ||
90 | * datasheet. | ||
91 | */ | ||
92 | |||
93 | __raw_writeb((1 << (5 - bit)), ctrl_addr); | ||
94 | |||
95 | while (timeout_cnt--) { | ||
96 | if (!((__raw_readb(ctrl_addr) >> (5 - bit)) & 0x1)) | ||
97 | break; | ||
98 | |||
99 | udelay(1); | ||
100 | } | ||
101 | |||
102 | return ((__raw_readb(ctrl_addr)) >> 3) & 0x1; | ||
103 | } | ||
104 | |||
105 | static int __init mxc_w1_probe(struct platform_device *pdev) | ||
106 | { | ||
107 | struct mxc_w1_device *mdev; | ||
108 | struct resource *res; | ||
109 | int err = 0; | ||
110 | |||
111 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
112 | if (!res) | ||
113 | return -ENODEV; | ||
114 | |||
115 | mdev = kzalloc(sizeof(struct mxc_w1_device), GFP_KERNEL); | ||
116 | if (!mdev) | ||
117 | return -ENOMEM; | ||
118 | |||
119 | mdev->clk = clk_get(&pdev->dev, "owire_clk"); | ||
120 | if (!mdev->clk) { | ||
121 | err = -ENODEV; | ||
122 | goto failed_clk; | ||
123 | } | ||
124 | |||
125 | mdev->clkdiv = (clk_get_rate(mdev->clk) / 1000000) - 1; | ||
126 | |||
127 | res = request_mem_region(res->start, resource_size(res), | ||
128 | "mxc_w1"); | ||
129 | if (!res) { | ||
130 | err = -EBUSY; | ||
131 | goto failed_req; | ||
132 | } | ||
133 | |||
134 | mdev->regs = ioremap(res->start, resource_size(res)); | ||
135 | if (!mdev->regs) { | ||
136 | printk(KERN_ERR "Cannot map frame buffer registers\n"); | ||
137 | goto failed_ioremap; | ||
138 | } | ||
139 | |||
140 | clk_enable(mdev->clk); | ||
141 | __raw_writeb(mdev->clkdiv, mdev->regs + MXC_W1_TIME_DIVIDER); | ||
142 | |||
143 | mdev->bus_master.data = mdev; | ||
144 | mdev->bus_master.reset_bus = mxc_w1_ds2_reset_bus; | ||
145 | mdev->bus_master.touch_bit = mxc_w1_ds2_touch_bit; | ||
146 | |||
147 | err = w1_add_master_device(&mdev->bus_master); | ||
148 | |||
149 | if (err) | ||
150 | goto failed_add; | ||
151 | |||
152 | platform_set_drvdata(pdev, mdev); | ||
153 | return 0; | ||
154 | |||
155 | failed_add: | ||
156 | iounmap(mdev->regs); | ||
157 | failed_ioremap: | ||
158 | release_mem_region(res->start, resource_size(res)); | ||
159 | failed_req: | ||
160 | clk_put(mdev->clk); | ||
161 | failed_clk: | ||
162 | kfree(mdev); | ||
163 | return err; | ||
164 | } | ||
165 | |||
166 | /* | ||
167 | * disassociate the w1 device from the driver | ||
168 | */ | ||
169 | static int mxc_w1_remove(struct platform_device *pdev) | ||
170 | { | ||
171 | struct mxc_w1_device *mdev = platform_get_drvdata(pdev); | ||
172 | struct resource *res; | ||
173 | |||
174 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
175 | |||
176 | w1_remove_master_device(&mdev->bus_master); | ||
177 | |||
178 | iounmap(mdev->regs); | ||
179 | release_mem_region(res->start, resource_size(res)); | ||
180 | clk_disable(mdev->clk); | ||
181 | clk_put(mdev->clk); | ||
182 | |||
183 | platform_set_drvdata(pdev, NULL); | ||
184 | |||
185 | return 0; | ||
186 | } | ||
187 | |||
188 | static struct platform_driver mxc_w1_driver = { | ||
189 | .driver = { | ||
190 | .name = "mxc_w1", | ||
191 | }, | ||
192 | .probe = mxc_w1_probe, | ||
193 | .remove = mxc_w1_remove, | ||
194 | }; | ||
195 | |||
196 | static int __init mxc_w1_init(void) | ||
197 | { | ||
198 | return platform_driver_register(&mxc_w1_driver); | ||
199 | } | ||
200 | |||
201 | static void mxc_w1_exit(void) | ||
202 | { | ||
203 | platform_driver_unregister(&mxc_w1_driver); | ||
204 | } | ||
205 | |||
206 | module_init(mxc_w1_init); | ||
207 | module_exit(mxc_w1_exit); | ||
208 | |||
209 | MODULE_LICENSE("GPL"); | ||
210 | MODULE_AUTHOR("Freescale Semiconductors Inc"); | ||
211 | MODULE_DESCRIPTION("Driver for One-Wire on MXC"); | ||
diff --git a/drivers/w1/w1.h b/drivers/w1/w1.h index 97304bd83ec9..d8a9709f3449 100644 --- a/drivers/w1/w1.h +++ b/drivers/w1/w1.h | |||
@@ -210,6 +210,7 @@ u8 w1_read_8(struct w1_master *); | |||
210 | int w1_reset_bus(struct w1_master *); | 210 | int w1_reset_bus(struct w1_master *); |
211 | u8 w1_calc_crc8(u8 *, int); | 211 | u8 w1_calc_crc8(u8 *, int); |
212 | void w1_write_block(struct w1_master *, const u8 *, int); | 212 | void w1_write_block(struct w1_master *, const u8 *, int); |
213 | void w1_touch_block(struct w1_master *, u8 *, int); | ||
213 | u8 w1_read_block(struct w1_master *, u8 *, int); | 214 | u8 w1_read_block(struct w1_master *, u8 *, int); |
214 | int w1_reset_select_slave(struct w1_slave *sl); | 215 | int w1_reset_select_slave(struct w1_slave *sl); |
215 | void w1_next_pullup(struct w1_master *, int); | 216 | void w1_next_pullup(struct w1_master *, int); |
diff --git a/drivers/w1/w1_io.c b/drivers/w1/w1_io.c index 5139c25ca962..442bd8bbd4a5 100644 --- a/drivers/w1/w1_io.c +++ b/drivers/w1/w1_io.c | |||
@@ -238,7 +238,6 @@ EXPORT_SYMBOL_GPL(w1_read_8); | |||
238 | * @param dev the master device | 238 | * @param dev the master device |
239 | * @param buf pointer to the data to write | 239 | * @param buf pointer to the data to write |
240 | * @param len the number of bytes to write | 240 | * @param len the number of bytes to write |
241 | * @return the byte read | ||
242 | */ | 241 | */ |
243 | void w1_write_block(struct w1_master *dev, const u8 *buf, int len) | 242 | void w1_write_block(struct w1_master *dev, const u8 *buf, int len) |
244 | { | 243 | { |
@@ -256,6 +255,31 @@ void w1_write_block(struct w1_master *dev, const u8 *buf, int len) | |||
256 | EXPORT_SYMBOL_GPL(w1_write_block); | 255 | EXPORT_SYMBOL_GPL(w1_write_block); |
257 | 256 | ||
258 | /** | 257 | /** |
258 | * Touches a series of bytes. | ||
259 | * | ||
260 | * @param dev the master device | ||
261 | * @param buf pointer to the data to write | ||
262 | * @param len the number of bytes to write | ||
263 | */ | ||
264 | void w1_touch_block(struct w1_master *dev, u8 *buf, int len) | ||
265 | { | ||
266 | int i, j; | ||
267 | u8 tmp; | ||
268 | |||
269 | for (i = 0; i < len; ++i) { | ||
270 | tmp = 0; | ||
271 | for (j = 0; j < 8; ++j) { | ||
272 | if (j == 7) | ||
273 | w1_pre_write(dev); | ||
274 | tmp |= w1_touch_bit(dev, (buf[i] >> j) & 0x1) << j; | ||
275 | } | ||
276 | |||
277 | buf[i] = tmp; | ||
278 | } | ||
279 | } | ||
280 | EXPORT_SYMBOL_GPL(w1_touch_block); | ||
281 | |||
282 | /** | ||
259 | * Reads a series of bytes. | 283 | * Reads a series of bytes. |
260 | * | 284 | * |
261 | * @param dev the master device | 285 | * @param dev the master device |
diff --git a/drivers/w1/w1_netlink.c b/drivers/w1/w1_netlink.c index 65c5ebd0787e..fdf72851c574 100644 --- a/drivers/w1/w1_netlink.c +++ b/drivers/w1/w1_netlink.c | |||
@@ -47,21 +47,56 @@ void w1_netlink_send(struct w1_master *dev, struct w1_netlink_msg *msg) | |||
47 | cn_netlink_send(m, 0, GFP_KERNEL); | 47 | cn_netlink_send(m, 0, GFP_KERNEL); |
48 | } | 48 | } |
49 | 49 | ||
50 | static int w1_process_command_master(struct w1_master *dev, struct cn_msg *msg, | 50 | static void w1_send_slave(struct w1_master *dev, u64 rn) |
51 | struct w1_netlink_msg *hdr, struct w1_netlink_cmd *cmd) | 51 | { |
52 | struct cn_msg *msg = dev->priv; | ||
53 | struct w1_netlink_msg *hdr = (struct w1_netlink_msg *)(msg + 1); | ||
54 | struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)(hdr + 1); | ||
55 | int avail; | ||
56 | |||
57 | avail = dev->priv_size - cmd->len; | ||
58 | |||
59 | if (avail > 8) { | ||
60 | u64 *data = (void *)(cmd + 1) + cmd->len; | ||
61 | |||
62 | *data = rn; | ||
63 | cmd->len += 8; | ||
64 | hdr->len += 8; | ||
65 | msg->len += 8; | ||
66 | return; | ||
67 | } | ||
68 | |||
69 | msg->ack++; | ||
70 | cn_netlink_send(msg, 0, GFP_KERNEL); | ||
71 | |||
72 | msg->len = sizeof(struct w1_netlink_msg) + sizeof(struct w1_netlink_cmd); | ||
73 | hdr->len = sizeof(struct w1_netlink_cmd); | ||
74 | cmd->len = 0; | ||
75 | } | ||
76 | |||
77 | static int w1_process_search_command(struct w1_master *dev, struct cn_msg *msg, | ||
78 | unsigned int avail) | ||
52 | { | 79 | { |
53 | dev_dbg(&dev->dev, "%s: %s: cmd=%02x, len=%u.\n", | 80 | struct w1_netlink_msg *hdr = (struct w1_netlink_msg *)(msg + 1); |
54 | __func__, dev->name, cmd->cmd, cmd->len); | 81 | struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)(hdr + 1); |
82 | int search_type = (cmd->cmd == W1_CMD_ALARM_SEARCH)?W1_ALARM_SEARCH:W1_SEARCH; | ||
55 | 83 | ||
56 | if (cmd->cmd != W1_CMD_SEARCH && cmd->cmd != W1_CMD_ALARM_SEARCH) | 84 | dev->priv = msg; |
57 | return -EINVAL; | 85 | dev->priv_size = avail; |
86 | |||
87 | w1_search_devices(dev, search_type, w1_send_slave); | ||
88 | |||
89 | msg->ack = 0; | ||
90 | cn_netlink_send(msg, 0, GFP_KERNEL); | ||
91 | |||
92 | dev->priv = NULL; | ||
93 | dev->priv_size = 0; | ||
58 | 94 | ||
59 | w1_search_process(dev, (cmd->cmd == W1_CMD_ALARM_SEARCH)?W1_ALARM_SEARCH:W1_SEARCH); | ||
60 | return 0; | 95 | return 0; |
61 | } | 96 | } |
62 | 97 | ||
63 | static int w1_send_read_reply(struct w1_slave *sl, struct cn_msg *msg, | 98 | static int w1_send_read_reply(struct cn_msg *msg, struct w1_netlink_msg *hdr, |
64 | struct w1_netlink_msg *hdr, struct w1_netlink_cmd *cmd) | 99 | struct w1_netlink_cmd *cmd) |
65 | { | 100 | { |
66 | void *data; | 101 | void *data; |
67 | struct w1_netlink_msg *h; | 102 | struct w1_netlink_msg *h; |
@@ -85,7 +120,8 @@ static int w1_send_read_reply(struct w1_slave *sl, struct cn_msg *msg, | |||
85 | memcpy(c, cmd, sizeof(struct w1_netlink_cmd)); | 120 | memcpy(c, cmd, sizeof(struct w1_netlink_cmd)); |
86 | 121 | ||
87 | cm->ack = msg->seq+1; | 122 | cm->ack = msg->seq+1; |
88 | cm->len = sizeof(struct w1_netlink_msg) + sizeof(struct w1_netlink_cmd) + cmd->len; | 123 | cm->len = sizeof(struct w1_netlink_msg) + |
124 | sizeof(struct w1_netlink_cmd) + cmd->len; | ||
89 | 125 | ||
90 | h->len = sizeof(struct w1_netlink_cmd) + cmd->len; | 126 | h->len = sizeof(struct w1_netlink_cmd) + cmd->len; |
91 | 127 | ||
@@ -98,36 +134,178 @@ static int w1_send_read_reply(struct w1_slave *sl, struct cn_msg *msg, | |||
98 | return err; | 134 | return err; |
99 | } | 135 | } |
100 | 136 | ||
101 | static int w1_process_command_slave(struct w1_slave *sl, struct cn_msg *msg, | 137 | static int w1_process_command_io(struct w1_master *dev, struct cn_msg *msg, |
102 | struct w1_netlink_msg *hdr, struct w1_netlink_cmd *cmd) | 138 | struct w1_netlink_msg *hdr, struct w1_netlink_cmd *cmd) |
103 | { | 139 | { |
104 | int err = 0; | 140 | int err = 0; |
105 | 141 | ||
106 | dev_dbg(&sl->master->dev, "%s: %02x.%012llx.%02x: cmd=%02x, len=%u.\n", | 142 | switch (cmd->cmd) { |
107 | __func__, sl->reg_num.family, (unsigned long long)sl->reg_num.id, sl->reg_num.crc, | 143 | case W1_CMD_TOUCH: |
108 | cmd->cmd, cmd->len); | 144 | w1_touch_block(dev, cmd->data, cmd->len); |
145 | w1_send_read_reply(msg, hdr, cmd); | ||
146 | break; | ||
147 | case W1_CMD_READ: | ||
148 | w1_read_block(dev, cmd->data, cmd->len); | ||
149 | w1_send_read_reply(msg, hdr, cmd); | ||
150 | break; | ||
151 | case W1_CMD_WRITE: | ||
152 | w1_write_block(dev, cmd->data, cmd->len); | ||
153 | break; | ||
154 | default: | ||
155 | err = -EINVAL; | ||
156 | break; | ||
157 | } | ||
158 | |||
159 | return err; | ||
160 | } | ||
161 | |||
162 | static int w1_process_command_master(struct w1_master *dev, struct cn_msg *req_msg, | ||
163 | struct w1_netlink_msg *req_hdr, struct w1_netlink_cmd *req_cmd) | ||
164 | { | ||
165 | int err = -EINVAL; | ||
166 | struct cn_msg *msg; | ||
167 | struct w1_netlink_msg *hdr; | ||
168 | struct w1_netlink_cmd *cmd; | ||
169 | |||
170 | msg = kzalloc(PAGE_SIZE, GFP_KERNEL); | ||
171 | if (!msg) | ||
172 | return -ENOMEM; | ||
173 | |||
174 | msg->id = req_msg->id; | ||
175 | msg->seq = req_msg->seq; | ||
176 | msg->ack = 0; | ||
177 | msg->len = sizeof(struct w1_netlink_msg) + sizeof(struct w1_netlink_cmd); | ||
178 | |||
179 | hdr = (struct w1_netlink_msg *)(msg + 1); | ||
180 | cmd = (struct w1_netlink_cmd *)(hdr + 1); | ||
181 | |||
182 | hdr->type = W1_MASTER_CMD; | ||
183 | hdr->id = req_hdr->id; | ||
184 | hdr->len = sizeof(struct w1_netlink_cmd); | ||
185 | |||
186 | cmd->cmd = req_cmd->cmd; | ||
187 | cmd->len = 0; | ||
109 | 188 | ||
110 | switch (cmd->cmd) { | 189 | switch (cmd->cmd) { |
111 | case W1_CMD_READ: | 190 | case W1_CMD_SEARCH: |
112 | w1_read_block(sl->master, cmd->data, cmd->len); | 191 | case W1_CMD_ALARM_SEARCH: |
113 | w1_send_read_reply(sl, msg, hdr, cmd); | 192 | err = w1_process_search_command(dev, msg, |
114 | break; | 193 | PAGE_SIZE - msg->len - sizeof(struct cn_msg)); |
115 | case W1_CMD_WRITE: | 194 | break; |
116 | w1_write_block(sl->master, cmd->data, cmd->len); | 195 | case W1_CMD_READ: |
117 | break; | 196 | case W1_CMD_WRITE: |
118 | case W1_CMD_SEARCH: | 197 | case W1_CMD_TOUCH: |
119 | case W1_CMD_ALARM_SEARCH: | 198 | err = w1_process_command_io(dev, req_msg, req_hdr, req_cmd); |
120 | w1_search_process(sl->master, | 199 | break; |
121 | (cmd->cmd == W1_CMD_ALARM_SEARCH)?W1_ALARM_SEARCH:W1_SEARCH); | 200 | case W1_CMD_RESET: |
122 | break; | 201 | err = w1_reset_bus(dev); |
123 | default: | 202 | break; |
124 | err = -1; | 203 | default: |
125 | break; | 204 | err = -EINVAL; |
205 | break; | ||
126 | } | 206 | } |
127 | 207 | ||
208 | kfree(msg); | ||
128 | return err; | 209 | return err; |
129 | } | 210 | } |
130 | 211 | ||
212 | static int w1_process_command_slave(struct w1_slave *sl, struct cn_msg *msg, | ||
213 | struct w1_netlink_msg *hdr, struct w1_netlink_cmd *cmd) | ||
214 | { | ||
215 | dev_dbg(&sl->master->dev, "%s: %02x.%012llx.%02x: cmd=%02x, len=%u.\n", | ||
216 | __func__, sl->reg_num.family, (unsigned long long)sl->reg_num.id, | ||
217 | sl->reg_num.crc, cmd->cmd, cmd->len); | ||
218 | |||
219 | return w1_process_command_io(sl->master, msg, hdr, cmd); | ||
220 | } | ||
221 | |||
222 | static int w1_process_command_root(struct cn_msg *msg, struct w1_netlink_msg *mcmd) | ||
223 | { | ||
224 | struct w1_master *m; | ||
225 | struct cn_msg *cn; | ||
226 | struct w1_netlink_msg *w; | ||
227 | u32 *id; | ||
228 | |||
229 | if (mcmd->type != W1_LIST_MASTERS) { | ||
230 | printk(KERN_NOTICE "%s: msg: %x.%x, wrong type: %u, len: %u.\n", | ||
231 | __func__, msg->id.idx, msg->id.val, mcmd->type, mcmd->len); | ||
232 | return -EPROTO; | ||
233 | } | ||
234 | |||
235 | cn = kmalloc(PAGE_SIZE, GFP_KERNEL); | ||
236 | if (!cn) | ||
237 | return -ENOMEM; | ||
238 | |||
239 | cn->id.idx = CN_W1_IDX; | ||
240 | cn->id.val = CN_W1_VAL; | ||
241 | |||
242 | cn->seq = msg->seq; | ||
243 | cn->ack = 1; | ||
244 | cn->len = sizeof(struct w1_netlink_msg); | ||
245 | w = (struct w1_netlink_msg *)(cn + 1); | ||
246 | |||
247 | w->type = W1_LIST_MASTERS; | ||
248 | w->status = 0; | ||
249 | w->len = 0; | ||
250 | id = (u32 *)(w + 1); | ||
251 | |||
252 | mutex_lock(&w1_mlock); | ||
253 | list_for_each_entry(m, &w1_masters, w1_master_entry) { | ||
254 | if (cn->len + sizeof(*id) > PAGE_SIZE - sizeof(struct cn_msg)) { | ||
255 | cn_netlink_send(cn, 0, GFP_KERNEL); | ||
256 | cn->ack++; | ||
257 | cn->len = sizeof(struct w1_netlink_msg); | ||
258 | w->len = 0; | ||
259 | id = (u32 *)(w + 1); | ||
260 | } | ||
261 | |||
262 | *id = m->id; | ||
263 | w->len += sizeof(*id); | ||
264 | cn->len += sizeof(*id); | ||
265 | id++; | ||
266 | } | ||
267 | cn->ack = 0; | ||
268 | cn_netlink_send(cn, 0, GFP_KERNEL); | ||
269 | mutex_unlock(&w1_mlock); | ||
270 | |||
271 | kfree(cn); | ||
272 | return 0; | ||
273 | } | ||
274 | |||
275 | static int w1_netlink_send_error(struct cn_msg *rcmsg, struct w1_netlink_msg *rmsg, | ||
276 | struct w1_netlink_cmd *rcmd, int error) | ||
277 | { | ||
278 | struct cn_msg *cmsg; | ||
279 | struct w1_netlink_msg *msg; | ||
280 | struct w1_netlink_cmd *cmd; | ||
281 | |||
282 | cmsg = kzalloc(sizeof(*msg) + sizeof(*cmd) + sizeof(*cmsg), GFP_KERNEL); | ||
283 | if (!cmsg) | ||
284 | return -ENOMEM; | ||
285 | |||
286 | msg = (struct w1_netlink_msg *)(cmsg + 1); | ||
287 | cmd = (struct w1_netlink_cmd *)(msg + 1); | ||
288 | |||
289 | memcpy(cmsg, rcmsg, sizeof(*cmsg)); | ||
290 | cmsg->len = sizeof(*msg); | ||
291 | |||
292 | memcpy(msg, rmsg, sizeof(*msg)); | ||
293 | msg->len = 0; | ||
294 | msg->status = (short)-error; | ||
295 | |||
296 | if (rcmd) { | ||
297 | memcpy(cmd, rcmd, sizeof(*cmd)); | ||
298 | cmd->len = 0; | ||
299 | msg->len += sizeof(*cmd); | ||
300 | cmsg->len += sizeof(*cmd); | ||
301 | } | ||
302 | |||
303 | error = cn_netlink_send(cmsg, 0, GFP_KERNEL); | ||
304 | kfree(cmsg); | ||
305 | |||
306 | return error; | ||
307 | } | ||
308 | |||
131 | static void w1_cn_callback(void *data) | 309 | static void w1_cn_callback(void *data) |
132 | { | 310 | { |
133 | struct cn_msg *msg = data; | 311 | struct cn_msg *msg = data; |
@@ -144,6 +322,7 @@ static void w1_cn_callback(void *data) | |||
144 | 322 | ||
145 | dev = NULL; | 323 | dev = NULL; |
146 | sl = NULL; | 324 | sl = NULL; |
325 | cmd = NULL; | ||
147 | 326 | ||
148 | memcpy(&id, m->id.id, sizeof(id)); | 327 | memcpy(&id, m->id.id, sizeof(id)); |
149 | #if 0 | 328 | #if 0 |
@@ -155,15 +334,15 @@ static void w1_cn_callback(void *data) | |||
155 | break; | 334 | break; |
156 | } | 335 | } |
157 | 336 | ||
158 | if (!mlen) | ||
159 | goto out_cont; | ||
160 | |||
161 | if (m->type == W1_MASTER_CMD) { | 337 | if (m->type == W1_MASTER_CMD) { |
162 | dev = w1_search_master_id(m->id.mst.id); | 338 | dev = w1_search_master_id(m->id.mst.id); |
163 | } else if (m->type == W1_SLAVE_CMD) { | 339 | } else if (m->type == W1_SLAVE_CMD) { |
164 | sl = w1_search_slave(&id); | 340 | sl = w1_search_slave(&id); |
165 | if (sl) | 341 | if (sl) |
166 | dev = sl->master; | 342 | dev = sl->master; |
343 | } else { | ||
344 | err = w1_process_command_root(msg, m); | ||
345 | goto out_cont; | ||
167 | } | 346 | } |
168 | 347 | ||
169 | if (!dev) { | 348 | if (!dev) { |
@@ -171,6 +350,10 @@ static void w1_cn_callback(void *data) | |||
171 | goto out_cont; | 350 | goto out_cont; |
172 | } | 351 | } |
173 | 352 | ||
353 | err = 0; | ||
354 | if (!mlen) | ||
355 | goto out_cont; | ||
356 | |||
174 | mutex_lock(&dev->mutex); | 357 | mutex_lock(&dev->mutex); |
175 | 358 | ||
176 | if (sl && w1_reset_select_slave(sl)) { | 359 | if (sl && w1_reset_select_slave(sl)) { |
@@ -187,9 +370,12 @@ static void w1_cn_callback(void *data) | |||
187 | } | 370 | } |
188 | 371 | ||
189 | if (sl) | 372 | if (sl) |
190 | w1_process_command_slave(sl, msg, m, cmd); | 373 | err = w1_process_command_slave(sl, msg, m, cmd); |
191 | else | 374 | else |
192 | w1_process_command_master(dev, msg, m, cmd); | 375 | err = w1_process_command_master(dev, msg, m, cmd); |
376 | |||
377 | w1_netlink_send_error(msg, m, cmd, err); | ||
378 | err = 0; | ||
193 | 379 | ||
194 | cmd_data += cmd->len + sizeof(struct w1_netlink_cmd); | 380 | cmd_data += cmd->len + sizeof(struct w1_netlink_cmd); |
195 | mlen -= cmd->len + sizeof(struct w1_netlink_cmd); | 381 | mlen -= cmd->len + sizeof(struct w1_netlink_cmd); |
@@ -200,6 +386,8 @@ out_up: | |||
200 | atomic_dec(&sl->refcnt); | 386 | atomic_dec(&sl->refcnt); |
201 | mutex_unlock(&dev->mutex); | 387 | mutex_unlock(&dev->mutex); |
202 | out_cont: | 388 | out_cont: |
389 | if (!cmd || err) | ||
390 | w1_netlink_send_error(msg, m, cmd, err); | ||
203 | msg->len -= sizeof(struct w1_netlink_msg) + m->len; | 391 | msg->len -= sizeof(struct w1_netlink_msg) + m->len; |
204 | m = (struct w1_netlink_msg *)(((u8 *)m) + sizeof(struct w1_netlink_msg) + m->len); | 392 | m = (struct w1_netlink_msg *)(((u8 *)m) + sizeof(struct w1_netlink_msg) + m->len); |
205 | 393 | ||
@@ -209,11 +397,6 @@ out_cont: | |||
209 | if (err == -ENODEV) | 397 | if (err == -ENODEV) |
210 | err = 0; | 398 | err = 0; |
211 | } | 399 | } |
212 | #if 0 | ||
213 | if (err) { | ||
214 | printk("%s: malformed message. Dropping.\n", __func__); | ||
215 | } | ||
216 | #endif | ||
217 | } | 400 | } |
218 | 401 | ||
219 | int w1_init_netlink(void) | 402 | int w1_init_netlink(void) |
diff --git a/drivers/w1/w1_netlink.h b/drivers/w1/w1_netlink.h index 56122b9e9294..27e950f935b1 100644 --- a/drivers/w1/w1_netlink.h +++ b/drivers/w1/w1_netlink.h | |||
@@ -34,12 +34,13 @@ enum w1_netlink_message_types { | |||
34 | W1_MASTER_REMOVE, | 34 | W1_MASTER_REMOVE, |
35 | W1_MASTER_CMD, | 35 | W1_MASTER_CMD, |
36 | W1_SLAVE_CMD, | 36 | W1_SLAVE_CMD, |
37 | W1_LIST_MASTERS, | ||
37 | }; | 38 | }; |
38 | 39 | ||
39 | struct w1_netlink_msg | 40 | struct w1_netlink_msg |
40 | { | 41 | { |
41 | __u8 type; | 42 | __u8 type; |
42 | __u8 reserved; | 43 | __u8 status; |
43 | __u16 len; | 44 | __u16 len; |
44 | union { | 45 | union { |
45 | __u8 id[8]; | 46 | __u8 id[8]; |
@@ -51,10 +52,15 @@ struct w1_netlink_msg | |||
51 | __u8 data[0]; | 52 | __u8 data[0]; |
52 | }; | 53 | }; |
53 | 54 | ||
54 | #define W1_CMD_READ 0x0 | 55 | enum w1_commands { |
55 | #define W1_CMD_WRITE 0x1 | 56 | W1_CMD_READ = 0, |
56 | #define W1_CMD_SEARCH 0x2 | 57 | W1_CMD_WRITE, |
57 | #define W1_CMD_ALARM_SEARCH 0x3 | 58 | W1_CMD_SEARCH, |
59 | W1_CMD_ALARM_SEARCH, | ||
60 | W1_CMD_TOUCH, | ||
61 | W1_CMD_RESET, | ||
62 | W1_CMD_MAX, | ||
63 | }; | ||
58 | 64 | ||
59 | struct w1_netlink_cmd | 65 | struct w1_netlink_cmd |
60 | { | 66 | { |
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index 4b75a16de009..526187c8a12d 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig | |||
@@ -17,3 +17,27 @@ config XEN_SCRUB_PAGES | |||
17 | is not accidentally visible to other domains. Is it more | 17 | is not accidentally visible to other domains. Is it more |
18 | secure, but slightly less efficient. | 18 | secure, but slightly less efficient. |
19 | If in doubt, say yes. | 19 | If in doubt, say yes. |
20 | |||
21 | config XENFS | ||
22 | tristate "Xen filesystem" | ||
23 | depends on XEN | ||
24 | default y | ||
25 | help | ||
26 | The xen filesystem provides a way for domains to share | ||
27 | information with each other and with the hypervisor. | ||
28 | For example, by reading and writing the "xenbus" file, guests | ||
29 | may pass arbitrary information to the initial domain. | ||
30 | If in doubt, say yes. | ||
31 | |||
32 | config XEN_COMPAT_XENFS | ||
33 | bool "Create compatibility mount point /proc/xen" | ||
34 | depends on XENFS | ||
35 | default y | ||
36 | help | ||
37 | The old xenstore userspace tools expect to find "xenbus" | ||
38 | under /proc/xen, but "xenbus" is now found at the root of the | ||
39 | xenfs filesystem. Selecting this causes the kernel to create | ||
40 | the compatibilty mount point /proc/xen if it is running on | ||
41 | a xen platform. | ||
42 | If in doubt, say yes. | ||
43 | |||
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile index d2a8fdf0e191..ff8accc9e103 100644 --- a/drivers/xen/Makefile +++ b/drivers/xen/Makefile | |||
@@ -1,5 +1,7 @@ | |||
1 | obj-y += grant-table.o features.o events.o manage.o | 1 | obj-y += grant-table.o features.o events.o manage.o |
2 | obj-y += xenbus/ | 2 | obj-y += xenbus/ |
3 | |||
3 | obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o | 4 | obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o |
4 | obj-$(CONFIG_XEN_XENCOMM) += xencomm.o | 5 | obj-$(CONFIG_XEN_XENCOMM) += xencomm.o |
5 | obj-$(CONFIG_XEN_BALLOON) += balloon.o | 6 | obj-$(CONFIG_XEN_BALLOON) += balloon.o |
7 | obj-$(CONFIG_XENFS) += xenfs/ \ No newline at end of file | ||
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c index 9678b3e98c63..92a1ef80a288 100644 --- a/drivers/xen/xenbus/xenbus_client.c +++ b/drivers/xen/xenbus/xenbus_client.c | |||
@@ -136,7 +136,6 @@ EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt); | |||
136 | /** | 136 | /** |
137 | * xenbus_switch_state | 137 | * xenbus_switch_state |
138 | * @dev: xenbus device | 138 | * @dev: xenbus device |
139 | * @xbt: transaction handle | ||
140 | * @state: new state | 139 | * @state: new state |
141 | * | 140 | * |
142 | * Advertise in the store a change of the given driver to the given new_state. | 141 | * Advertise in the store a change of the given driver to the given new_state. |
@@ -267,7 +266,7 @@ EXPORT_SYMBOL_GPL(xenbus_dev_error); | |||
267 | * @fmt: error message format | 266 | * @fmt: error message format |
268 | * | 267 | * |
269 | * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by | 268 | * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by |
270 | * xenbus_switch_state(dev, NULL, XenbusStateClosing) to schedule an orderly | 269 | * xenbus_switch_state(dev, XenbusStateClosing) to schedule an orderly |
271 | * closedown of this driver and its peer. | 270 | * closedown of this driver and its peer. |
272 | */ | 271 | */ |
273 | 272 | ||
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index b2a03184a246..773d1cf23283 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <linux/ctype.h> | 40 | #include <linux/ctype.h> |
41 | #include <linux/fcntl.h> | 41 | #include <linux/fcntl.h> |
42 | #include <linux/mm.h> | 42 | #include <linux/mm.h> |
43 | #include <linux/proc_fs.h> | ||
43 | #include <linux/notifier.h> | 44 | #include <linux/notifier.h> |
44 | #include <linux/kthread.h> | 45 | #include <linux/kthread.h> |
45 | #include <linux/mutex.h> | 46 | #include <linux/mutex.h> |
@@ -55,7 +56,10 @@ | |||
55 | #include "xenbus_comms.h" | 56 | #include "xenbus_comms.h" |
56 | #include "xenbus_probe.h" | 57 | #include "xenbus_probe.h" |
57 | 58 | ||
59 | |||
58 | int xen_store_evtchn; | 60 | int xen_store_evtchn; |
61 | EXPORT_SYMBOL(xen_store_evtchn); | ||
62 | |||
59 | struct xenstore_domain_interface *xen_store_interface; | 63 | struct xenstore_domain_interface *xen_store_interface; |
60 | static unsigned long xen_store_mfn; | 64 | static unsigned long xen_store_mfn; |
61 | 65 | ||
@@ -166,6 +170,9 @@ static int read_backend_details(struct xenbus_device *xendev) | |||
166 | return read_otherend_details(xendev, "backend-id", "backend"); | 170 | return read_otherend_details(xendev, "backend-id", "backend"); |
167 | } | 171 | } |
168 | 172 | ||
173 | static struct device_attribute xenbus_dev_attrs[] = { | ||
174 | __ATTR_NULL | ||
175 | }; | ||
169 | 176 | ||
170 | /* Bus type for frontend drivers. */ | 177 | /* Bus type for frontend drivers. */ |
171 | static struct xen_bus_type xenbus_frontend = { | 178 | static struct xen_bus_type xenbus_frontend = { |
@@ -174,12 +181,13 @@ static struct xen_bus_type xenbus_frontend = { | |||
174 | .get_bus_id = frontend_bus_id, | 181 | .get_bus_id = frontend_bus_id, |
175 | .probe = xenbus_probe_frontend, | 182 | .probe = xenbus_probe_frontend, |
176 | .bus = { | 183 | .bus = { |
177 | .name = "xen", | 184 | .name = "xen", |
178 | .match = xenbus_match, | 185 | .match = xenbus_match, |
179 | .uevent = xenbus_uevent, | 186 | .uevent = xenbus_uevent, |
180 | .probe = xenbus_dev_probe, | 187 | .probe = xenbus_dev_probe, |
181 | .remove = xenbus_dev_remove, | 188 | .remove = xenbus_dev_remove, |
182 | .shutdown = xenbus_dev_shutdown, | 189 | .shutdown = xenbus_dev_shutdown, |
190 | .dev_attrs = xenbus_dev_attrs, | ||
183 | }, | 191 | }, |
184 | }; | 192 | }; |
185 | 193 | ||
@@ -852,6 +860,14 @@ static int __init xenbus_probe_init(void) | |||
852 | if (!xen_initial_domain()) | 860 | if (!xen_initial_domain()) |
853 | xenbus_probe(NULL); | 861 | xenbus_probe(NULL); |
854 | 862 | ||
863 | #ifdef CONFIG_XEN_COMPAT_XENFS | ||
864 | /* | ||
865 | * Create xenfs mountpoint in /proc for compatibility with | ||
866 | * utilities that expect to find "xenbus" under "/proc/xen". | ||
867 | */ | ||
868 | proc_mkdir("xen", NULL); | ||
869 | #endif | ||
870 | |||
855 | return 0; | 871 | return 0; |
856 | 872 | ||
857 | out_unreg_back: | 873 | out_unreg_back: |
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c index 7f2f91c0e11d..e325eab4724d 100644 --- a/drivers/xen/xenbus/xenbus_xs.c +++ b/drivers/xen/xenbus/xenbus_xs.c | |||
@@ -184,6 +184,7 @@ void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg) | |||
184 | 184 | ||
185 | return ret; | 185 | return ret; |
186 | } | 186 | } |
187 | EXPORT_SYMBOL(xenbus_dev_request_and_reply); | ||
187 | 188 | ||
188 | /* Send message to xs, get kmalloc'ed reply. ERR_PTR() on error. */ | 189 | /* Send message to xs, get kmalloc'ed reply. ERR_PTR() on error. */ |
189 | static void *xs_talkv(struct xenbus_transaction t, | 190 | static void *xs_talkv(struct xenbus_transaction t, |
diff --git a/drivers/xen/xenfs/Makefile b/drivers/xen/xenfs/Makefile new file mode 100644 index 000000000000..25275c3bbdff --- /dev/null +++ b/drivers/xen/xenfs/Makefile | |||
@@ -0,0 +1,3 @@ | |||
1 | obj-$(CONFIG_XENFS) += xenfs.o | ||
2 | |||
3 | xenfs-objs = super.o xenbus.o \ No newline at end of file | ||
diff --git a/drivers/xen/xenfs/super.c b/drivers/xen/xenfs/super.c new file mode 100644 index 000000000000..515741a8e6b8 --- /dev/null +++ b/drivers/xen/xenfs/super.c | |||
@@ -0,0 +1,64 @@ | |||
1 | /* | ||
2 | * xenfs.c - a filesystem for passing info between the a domain and | ||
3 | * the hypervisor. | ||
4 | * | ||
5 | * 2008-10-07 Alex Zeffertt Replaced /proc/xen/xenbus with xenfs filesystem | ||
6 | * and /proc/xen compatibility mount point. | ||
7 | * Turned xenfs into a loadable module. | ||
8 | */ | ||
9 | |||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/errno.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/fs.h> | ||
14 | #include <linux/magic.h> | ||
15 | |||
16 | #include "xenfs.h" | ||
17 | |||
18 | #include <asm/xen/hypervisor.h> | ||
19 | |||
20 | MODULE_DESCRIPTION("Xen filesystem"); | ||
21 | MODULE_LICENSE("GPL"); | ||
22 | |||
23 | static int xenfs_fill_super(struct super_block *sb, void *data, int silent) | ||
24 | { | ||
25 | static struct tree_descr xenfs_files[] = { | ||
26 | [2] = {"xenbus", &xenbus_file_ops, S_IRUSR|S_IWUSR}, | ||
27 | {""}, | ||
28 | }; | ||
29 | |||
30 | return simple_fill_super(sb, XENFS_SUPER_MAGIC, xenfs_files); | ||
31 | } | ||
32 | |||
33 | static int xenfs_get_sb(struct file_system_type *fs_type, | ||
34 | int flags, const char *dev_name, | ||
35 | void *data, struct vfsmount *mnt) | ||
36 | { | ||
37 | return get_sb_single(fs_type, flags, data, xenfs_fill_super, mnt); | ||
38 | } | ||
39 | |||
40 | static struct file_system_type xenfs_type = { | ||
41 | .owner = THIS_MODULE, | ||
42 | .name = "xenfs", | ||
43 | .get_sb = xenfs_get_sb, | ||
44 | .kill_sb = kill_litter_super, | ||
45 | }; | ||
46 | |||
47 | static int __init xenfs_init(void) | ||
48 | { | ||
49 | if (xen_pv_domain()) | ||
50 | return register_filesystem(&xenfs_type); | ||
51 | |||
52 | printk(KERN_INFO "XENFS: not registering filesystem on non-xen platform\n"); | ||
53 | return 0; | ||
54 | } | ||
55 | |||
56 | static void __exit xenfs_exit(void) | ||
57 | { | ||
58 | if (xen_pv_domain()) | ||
59 | unregister_filesystem(&xenfs_type); | ||
60 | } | ||
61 | |||
62 | module_init(xenfs_init); | ||
63 | module_exit(xenfs_exit); | ||
64 | |||
diff --git a/drivers/xen/xenfs/xenbus.c b/drivers/xen/xenfs/xenbus.c new file mode 100644 index 000000000000..875a4c59c594 --- /dev/null +++ b/drivers/xen/xenfs/xenbus.c | |||
@@ -0,0 +1,593 @@ | |||
1 | /* | ||
2 | * Driver giving user-space access to the kernel's xenbus connection | ||
3 | * to xenstore. | ||
4 | * | ||
5 | * Copyright (c) 2005, Christian Limpach | ||
6 | * Copyright (c) 2005, Rusty Russell, IBM Corporation | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version 2 | ||
10 | * as published by the Free Software Foundation; or, when distributed | ||
11 | * separately from the Linux kernel or incorporated into other | ||
12 | * software packages, subject to the following license: | ||
13 | * | ||
14 | * Permission is hereby granted, free of charge, to any person obtaining a copy | ||
15 | * of this source file (the "Software"), to deal in the Software without | ||
16 | * restriction, including without limitation the rights to use, copy, modify, | ||
17 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, | ||
18 | * and to permit persons to whom the Software is furnished to do so, subject to | ||
19 | * the following conditions: | ||
20 | * | ||
21 | * The above copyright notice and this permission notice shall be included in | ||
22 | * all copies or substantial portions of the Software. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
25 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
26 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
27 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
28 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
29 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
30 | * IN THE SOFTWARE. | ||
31 | * | ||
32 | * Changes: | ||
33 | * 2008-10-07 Alex Zeffertt Replaced /proc/xen/xenbus with xenfs filesystem | ||
34 | * and /proc/xen compatibility mount point. | ||
35 | * Turned xenfs into a loadable module. | ||
36 | */ | ||
37 | |||
38 | #include <linux/kernel.h> | ||
39 | #include <linux/errno.h> | ||
40 | #include <linux/uio.h> | ||
41 | #include <linux/notifier.h> | ||
42 | #include <linux/wait.h> | ||
43 | #include <linux/fs.h> | ||
44 | #include <linux/poll.h> | ||
45 | #include <linux/mutex.h> | ||
46 | #include <linux/spinlock.h> | ||
47 | #include <linux/mount.h> | ||
48 | #include <linux/pagemap.h> | ||
49 | #include <linux/uaccess.h> | ||
50 | #include <linux/init.h> | ||
51 | #include <linux/namei.h> | ||
52 | #include <linux/string.h> | ||
53 | |||
54 | #include "xenfs.h" | ||
55 | #include "../xenbus/xenbus_comms.h" | ||
56 | |||
57 | #include <xen/xenbus.h> | ||
58 | #include <asm/xen/hypervisor.h> | ||
59 | |||
60 | /* | ||
61 | * An element of a list of outstanding transactions, for which we're | ||
62 | * still waiting a reply. | ||
63 | */ | ||
64 | struct xenbus_transaction_holder { | ||
65 | struct list_head list; | ||
66 | struct xenbus_transaction handle; | ||
67 | }; | ||
68 | |||
69 | /* | ||
70 | * A buffer of data on the queue. | ||
71 | */ | ||
72 | struct read_buffer { | ||
73 | struct list_head list; | ||
74 | unsigned int cons; | ||
75 | unsigned int len; | ||
76 | char msg[]; | ||
77 | }; | ||
78 | |||
79 | struct xenbus_file_priv { | ||
80 | /* | ||
81 | * msgbuffer_mutex is held while partial requests are built up | ||
82 | * and complete requests are acted on. It therefore protects | ||
83 | * the "transactions" and "watches" lists, and the partial | ||
84 | * request length and buffer. | ||
85 | * | ||
86 | * reply_mutex protects the reply being built up to return to | ||
87 | * usermode. It nests inside msgbuffer_mutex but may be held | ||
88 | * alone during a watch callback. | ||
89 | */ | ||
90 | struct mutex msgbuffer_mutex; | ||
91 | |||
92 | /* In-progress transactions */ | ||
93 | struct list_head transactions; | ||
94 | |||
95 | /* Active watches. */ | ||
96 | struct list_head watches; | ||
97 | |||
98 | /* Partial request. */ | ||
99 | unsigned int len; | ||
100 | union { | ||
101 | struct xsd_sockmsg msg; | ||
102 | char buffer[PAGE_SIZE]; | ||
103 | } u; | ||
104 | |||
105 | /* Response queue. */ | ||
106 | struct mutex reply_mutex; | ||
107 | struct list_head read_buffers; | ||
108 | wait_queue_head_t read_waitq; | ||
109 | |||
110 | }; | ||
111 | |||
112 | /* Read out any raw xenbus messages queued up. */ | ||
113 | static ssize_t xenbus_file_read(struct file *filp, | ||
114 | char __user *ubuf, | ||
115 | size_t len, loff_t *ppos) | ||
116 | { | ||
117 | struct xenbus_file_priv *u = filp->private_data; | ||
118 | struct read_buffer *rb; | ||
119 | unsigned i; | ||
120 | int ret; | ||
121 | |||
122 | mutex_lock(&u->reply_mutex); | ||
123 | while (list_empty(&u->read_buffers)) { | ||
124 | mutex_unlock(&u->reply_mutex); | ||
125 | ret = wait_event_interruptible(u->read_waitq, | ||
126 | !list_empty(&u->read_buffers)); | ||
127 | if (ret) | ||
128 | return ret; | ||
129 | mutex_lock(&u->reply_mutex); | ||
130 | } | ||
131 | |||
132 | rb = list_entry(u->read_buffers.next, struct read_buffer, list); | ||
133 | i = 0; | ||
134 | while (i < len) { | ||
135 | unsigned sz = min((unsigned)len - i, rb->len - rb->cons); | ||
136 | |||
137 | ret = copy_to_user(ubuf + i, &rb->msg[rb->cons], sz); | ||
138 | |||
139 | i += sz - ret; | ||
140 | rb->cons += sz - ret; | ||
141 | |||
142 | if (ret != sz) { | ||
143 | if (i == 0) | ||
144 | i = -EFAULT; | ||
145 | goto out; | ||
146 | } | ||
147 | |||
148 | /* Clear out buffer if it has been consumed */ | ||
149 | if (rb->cons == rb->len) { | ||
150 | list_del(&rb->list); | ||
151 | kfree(rb); | ||
152 | if (list_empty(&u->read_buffers)) | ||
153 | break; | ||
154 | rb = list_entry(u->read_buffers.next, | ||
155 | struct read_buffer, list); | ||
156 | } | ||
157 | } | ||
158 | |||
159 | out: | ||
160 | mutex_unlock(&u->reply_mutex); | ||
161 | return i; | ||
162 | } | ||
163 | |||
164 | /* | ||
165 | * Add a buffer to the queue. Caller must hold the appropriate lock | ||
166 | * if the queue is not local. (Commonly the caller will build up | ||
167 | * multiple queued buffers on a temporary local list, and then add it | ||
168 | * to the appropriate list under lock once all the buffers have een | ||
169 | * successfully allocated.) | ||
170 | */ | ||
171 | static int queue_reply(struct list_head *queue, const void *data, size_t len) | ||
172 | { | ||
173 | struct read_buffer *rb; | ||
174 | |||
175 | if (len == 0) | ||
176 | return 0; | ||
177 | |||
178 | rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL); | ||
179 | if (rb == NULL) | ||
180 | return -ENOMEM; | ||
181 | |||
182 | rb->cons = 0; | ||
183 | rb->len = len; | ||
184 | |||
185 | memcpy(rb->msg, data, len); | ||
186 | |||
187 | list_add_tail(&rb->list, queue); | ||
188 | return 0; | ||
189 | } | ||
190 | |||
191 | /* | ||
192 | * Free all the read_buffer s on a list. | ||
193 | * Caller must have sole reference to list. | ||
194 | */ | ||
195 | static void queue_cleanup(struct list_head *list) | ||
196 | { | ||
197 | struct read_buffer *rb; | ||
198 | |||
199 | while (!list_empty(list)) { | ||
200 | rb = list_entry(list->next, struct read_buffer, list); | ||
201 | list_del(list->next); | ||
202 | kfree(rb); | ||
203 | } | ||
204 | } | ||
205 | |||
206 | struct watch_adapter { | ||
207 | struct list_head list; | ||
208 | struct xenbus_watch watch; | ||
209 | struct xenbus_file_priv *dev_data; | ||
210 | char *token; | ||
211 | }; | ||
212 | |||
213 | static void free_watch_adapter(struct watch_adapter *watch) | ||
214 | { | ||
215 | kfree(watch->watch.node); | ||
216 | kfree(watch->token); | ||
217 | kfree(watch); | ||
218 | } | ||
219 | |||
220 | static struct watch_adapter *alloc_watch_adapter(const char *path, | ||
221 | const char *token) | ||
222 | { | ||
223 | struct watch_adapter *watch; | ||
224 | |||
225 | watch = kzalloc(sizeof(*watch), GFP_KERNEL); | ||
226 | if (watch == NULL) | ||
227 | goto out_fail; | ||
228 | |||
229 | watch->watch.node = kstrdup(path, GFP_KERNEL); | ||
230 | if (watch->watch.node == NULL) | ||
231 | goto out_free; | ||
232 | |||
233 | watch->token = kstrdup(token, GFP_KERNEL); | ||
234 | if (watch->token == NULL) | ||
235 | goto out_free; | ||
236 | |||
237 | return watch; | ||
238 | |||
239 | out_free: | ||
240 | free_watch_adapter(watch); | ||
241 | |||
242 | out_fail: | ||
243 | return NULL; | ||
244 | } | ||
245 | |||
246 | static void watch_fired(struct xenbus_watch *watch, | ||
247 | const char **vec, | ||
248 | unsigned int len) | ||
249 | { | ||
250 | struct watch_adapter *adap; | ||
251 | struct xsd_sockmsg hdr; | ||
252 | const char *path, *token; | ||
253 | int path_len, tok_len, body_len, data_len = 0; | ||
254 | int ret; | ||
255 | LIST_HEAD(staging_q); | ||
256 | |||
257 | adap = container_of(watch, struct watch_adapter, watch); | ||
258 | |||
259 | path = vec[XS_WATCH_PATH]; | ||
260 | token = adap->token; | ||
261 | |||
262 | path_len = strlen(path) + 1; | ||
263 | tok_len = strlen(token) + 1; | ||
264 | if (len > 2) | ||
265 | data_len = vec[len] - vec[2] + 1; | ||
266 | body_len = path_len + tok_len + data_len; | ||
267 | |||
268 | hdr.type = XS_WATCH_EVENT; | ||
269 | hdr.len = body_len; | ||
270 | |||
271 | mutex_lock(&adap->dev_data->reply_mutex); | ||
272 | |||
273 | ret = queue_reply(&staging_q, &hdr, sizeof(hdr)); | ||
274 | if (!ret) | ||
275 | ret = queue_reply(&staging_q, path, path_len); | ||
276 | if (!ret) | ||
277 | ret = queue_reply(&staging_q, token, tok_len); | ||
278 | if (!ret && len > 2) | ||
279 | ret = queue_reply(&staging_q, vec[2], data_len); | ||
280 | |||
281 | if (!ret) { | ||
282 | /* success: pass reply list onto watcher */ | ||
283 | list_splice_tail(&staging_q, &adap->dev_data->read_buffers); | ||
284 | wake_up(&adap->dev_data->read_waitq); | ||
285 | } else | ||
286 | queue_cleanup(&staging_q); | ||
287 | |||
288 | mutex_unlock(&adap->dev_data->reply_mutex); | ||
289 | } | ||
290 | |||
291 | static int xenbus_write_transaction(unsigned msg_type, | ||
292 | struct xenbus_file_priv *u) | ||
293 | { | ||
294 | int rc, ret; | ||
295 | void *reply; | ||
296 | struct xenbus_transaction_holder *trans = NULL; | ||
297 | LIST_HEAD(staging_q); | ||
298 | |||
299 | if (msg_type == XS_TRANSACTION_START) { | ||
300 | trans = kmalloc(sizeof(*trans), GFP_KERNEL); | ||
301 | if (!trans) { | ||
302 | rc = -ENOMEM; | ||
303 | goto out; | ||
304 | } | ||
305 | } | ||
306 | |||
307 | reply = xenbus_dev_request_and_reply(&u->u.msg); | ||
308 | if (IS_ERR(reply)) { | ||
309 | kfree(trans); | ||
310 | rc = PTR_ERR(reply); | ||
311 | goto out; | ||
312 | } | ||
313 | |||
314 | if (msg_type == XS_TRANSACTION_START) { | ||
315 | trans->handle.id = simple_strtoul(reply, NULL, 0); | ||
316 | |||
317 | list_add(&trans->list, &u->transactions); | ||
318 | } else if (msg_type == XS_TRANSACTION_END) { | ||
319 | list_for_each_entry(trans, &u->transactions, list) | ||
320 | if (trans->handle.id == u->u.msg.tx_id) | ||
321 | break; | ||
322 | BUG_ON(&trans->list == &u->transactions); | ||
323 | list_del(&trans->list); | ||
324 | |||
325 | kfree(trans); | ||
326 | } | ||
327 | |||
328 | mutex_lock(&u->reply_mutex); | ||
329 | ret = queue_reply(&staging_q, &u->u.msg, sizeof(u->u.msg)); | ||
330 | if (!ret) | ||
331 | ret = queue_reply(&staging_q, reply, u->u.msg.len); | ||
332 | if (!ret) { | ||
333 | list_splice_tail(&staging_q, &u->read_buffers); | ||
334 | wake_up(&u->read_waitq); | ||
335 | } else { | ||
336 | queue_cleanup(&staging_q); | ||
337 | rc = ret; | ||
338 | } | ||
339 | mutex_unlock(&u->reply_mutex); | ||
340 | |||
341 | kfree(reply); | ||
342 | |||
343 | out: | ||
344 | return rc; | ||
345 | } | ||
346 | |||
347 | static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u) | ||
348 | { | ||
349 | struct watch_adapter *watch, *tmp_watch; | ||
350 | char *path, *token; | ||
351 | int err, rc; | ||
352 | LIST_HEAD(staging_q); | ||
353 | |||
354 | path = u->u.buffer + sizeof(u->u.msg); | ||
355 | token = memchr(path, 0, u->u.msg.len); | ||
356 | if (token == NULL) { | ||
357 | rc = -EILSEQ; | ||
358 | goto out; | ||
359 | } | ||
360 | token++; | ||
361 | |||
362 | if (msg_type == XS_WATCH) { | ||
363 | watch = alloc_watch_adapter(path, token); | ||
364 | if (watch == NULL) { | ||
365 | rc = -ENOMEM; | ||
366 | goto out; | ||
367 | } | ||
368 | |||
369 | watch->watch.callback = watch_fired; | ||
370 | watch->dev_data = u; | ||
371 | |||
372 | err = register_xenbus_watch(&watch->watch); | ||
373 | if (err) { | ||
374 | free_watch_adapter(watch); | ||
375 | rc = err; | ||
376 | goto out; | ||
377 | } | ||
378 | list_add(&watch->list, &u->watches); | ||
379 | } else { | ||
380 | list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) { | ||
381 | if (!strcmp(watch->token, token) && | ||
382 | !strcmp(watch->watch.node, path)) { | ||
383 | unregister_xenbus_watch(&watch->watch); | ||
384 | list_del(&watch->list); | ||
385 | free_watch_adapter(watch); | ||
386 | break; | ||
387 | } | ||
388 | } | ||
389 | } | ||
390 | |||
391 | /* Success. Synthesize a reply to say all is OK. */ | ||
392 | { | ||
393 | struct { | ||
394 | struct xsd_sockmsg hdr; | ||
395 | char body[3]; | ||
396 | } __packed reply = { | ||
397 | { | ||
398 | .type = msg_type, | ||
399 | .len = sizeof(reply.body) | ||
400 | }, | ||
401 | "OK" | ||
402 | }; | ||
403 | |||
404 | mutex_lock(&u->reply_mutex); | ||
405 | rc = queue_reply(&u->read_buffers, &reply, sizeof(reply)); | ||
406 | mutex_unlock(&u->reply_mutex); | ||
407 | } | ||
408 | |||
409 | out: | ||
410 | return rc; | ||
411 | } | ||
412 | |||
413 | static ssize_t xenbus_file_write(struct file *filp, | ||
414 | const char __user *ubuf, | ||
415 | size_t len, loff_t *ppos) | ||
416 | { | ||
417 | struct xenbus_file_priv *u = filp->private_data; | ||
418 | uint32_t msg_type; | ||
419 | int rc = len; | ||
420 | int ret; | ||
421 | LIST_HEAD(staging_q); | ||
422 | |||
423 | /* | ||
424 | * We're expecting usermode to be writing properly formed | ||
425 | * xenbus messages. If they write an incomplete message we | ||
426 | * buffer it up. Once it is complete, we act on it. | ||
427 | */ | ||
428 | |||
429 | /* | ||
430 | * Make sure concurrent writers can't stomp all over each | ||
431 | * other's messages and make a mess of our partial message | ||
432 | * buffer. We don't make any attemppt to stop multiple | ||
433 | * writers from making a mess of each other's incomplete | ||
434 | * messages; we're just trying to guarantee our own internal | ||
435 | * consistency and make sure that single writes are handled | ||
436 | * atomically. | ||
437 | */ | ||
438 | mutex_lock(&u->msgbuffer_mutex); | ||
439 | |||
440 | /* Get this out of the way early to avoid confusion */ | ||
441 | if (len == 0) | ||
442 | goto out; | ||
443 | |||
444 | /* Can't write a xenbus message larger we can buffer */ | ||
445 | if ((len + u->len) > sizeof(u->u.buffer)) { | ||
446 | /* On error, dump existing buffer */ | ||
447 | u->len = 0; | ||
448 | rc = -EINVAL; | ||
449 | goto out; | ||
450 | } | ||
451 | |||
452 | ret = copy_from_user(u->u.buffer + u->len, ubuf, len); | ||
453 | |||
454 | if (ret == len) { | ||
455 | rc = -EFAULT; | ||
456 | goto out; | ||
457 | } | ||
458 | |||
459 | /* Deal with a partial copy. */ | ||
460 | len -= ret; | ||
461 | rc = len; | ||
462 | |||
463 | u->len += len; | ||
464 | |||
465 | /* Return if we haven't got a full message yet */ | ||
466 | if (u->len < sizeof(u->u.msg)) | ||
467 | goto out; /* not even the header yet */ | ||
468 | |||
469 | /* If we're expecting a message that's larger than we can | ||
470 | possibly send, dump what we have and return an error. */ | ||
471 | if ((sizeof(u->u.msg) + u->u.msg.len) > sizeof(u->u.buffer)) { | ||
472 | rc = -E2BIG; | ||
473 | u->len = 0; | ||
474 | goto out; | ||
475 | } | ||
476 | |||
477 | if (u->len < (sizeof(u->u.msg) + u->u.msg.len)) | ||
478 | goto out; /* incomplete data portion */ | ||
479 | |||
480 | /* | ||
481 | * OK, now we have a complete message. Do something with it. | ||
482 | */ | ||
483 | |||
484 | msg_type = u->u.msg.type; | ||
485 | |||
486 | switch (msg_type) { | ||
487 | case XS_TRANSACTION_START: | ||
488 | case XS_TRANSACTION_END: | ||
489 | case XS_DIRECTORY: | ||
490 | case XS_READ: | ||
491 | case XS_GET_PERMS: | ||
492 | case XS_RELEASE: | ||
493 | case XS_GET_DOMAIN_PATH: | ||
494 | case XS_WRITE: | ||
495 | case XS_MKDIR: | ||
496 | case XS_RM: | ||
497 | case XS_SET_PERMS: | ||
498 | /* Send out a transaction */ | ||
499 | ret = xenbus_write_transaction(msg_type, u); | ||
500 | break; | ||
501 | |||
502 | case XS_WATCH: | ||
503 | case XS_UNWATCH: | ||
504 | /* (Un)Ask for some path to be watched for changes */ | ||
505 | ret = xenbus_write_watch(msg_type, u); | ||
506 | break; | ||
507 | |||
508 | default: | ||
509 | ret = -EINVAL; | ||
510 | break; | ||
511 | } | ||
512 | if (ret != 0) | ||
513 | rc = ret; | ||
514 | |||
515 | /* Buffered message consumed */ | ||
516 | u->len = 0; | ||
517 | |||
518 | out: | ||
519 | mutex_unlock(&u->msgbuffer_mutex); | ||
520 | return rc; | ||
521 | } | ||
522 | |||
523 | static int xenbus_file_open(struct inode *inode, struct file *filp) | ||
524 | { | ||
525 | struct xenbus_file_priv *u; | ||
526 | |||
527 | if (xen_store_evtchn == 0) | ||
528 | return -ENOENT; | ||
529 | |||
530 | nonseekable_open(inode, filp); | ||
531 | |||
532 | u = kzalloc(sizeof(*u), GFP_KERNEL); | ||
533 | if (u == NULL) | ||
534 | return -ENOMEM; | ||
535 | |||
536 | INIT_LIST_HEAD(&u->transactions); | ||
537 | INIT_LIST_HEAD(&u->watches); | ||
538 | INIT_LIST_HEAD(&u->read_buffers); | ||
539 | init_waitqueue_head(&u->read_waitq); | ||
540 | |||
541 | mutex_init(&u->reply_mutex); | ||
542 | mutex_init(&u->msgbuffer_mutex); | ||
543 | |||
544 | filp->private_data = u; | ||
545 | |||
546 | return 0; | ||
547 | } | ||
548 | |||
549 | static int xenbus_file_release(struct inode *inode, struct file *filp) | ||
550 | { | ||
551 | struct xenbus_file_priv *u = filp->private_data; | ||
552 | struct xenbus_transaction_holder *trans, *tmp; | ||
553 | struct watch_adapter *watch, *tmp_watch; | ||
554 | |||
555 | /* | ||
556 | * No need for locking here because there are no other users, | ||
557 | * by definition. | ||
558 | */ | ||
559 | |||
560 | list_for_each_entry_safe(trans, tmp, &u->transactions, list) { | ||
561 | xenbus_transaction_end(trans->handle, 1); | ||
562 | list_del(&trans->list); | ||
563 | kfree(trans); | ||
564 | } | ||
565 | |||
566 | list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) { | ||
567 | unregister_xenbus_watch(&watch->watch); | ||
568 | list_del(&watch->list); | ||
569 | free_watch_adapter(watch); | ||
570 | } | ||
571 | |||
572 | kfree(u); | ||
573 | |||
574 | return 0; | ||
575 | } | ||
576 | |||
577 | static unsigned int xenbus_file_poll(struct file *file, poll_table *wait) | ||
578 | { | ||
579 | struct xenbus_file_priv *u = file->private_data; | ||
580 | |||
581 | poll_wait(file, &u->read_waitq, wait); | ||
582 | if (!list_empty(&u->read_buffers)) | ||
583 | return POLLIN | POLLRDNORM; | ||
584 | return 0; | ||
585 | } | ||
586 | |||
587 | const struct file_operations xenbus_file_ops = { | ||
588 | .read = xenbus_file_read, | ||
589 | .write = xenbus_file_write, | ||
590 | .open = xenbus_file_open, | ||
591 | .release = xenbus_file_release, | ||
592 | .poll = xenbus_file_poll, | ||
593 | }; | ||
diff --git a/drivers/xen/xenfs/xenfs.h b/drivers/xen/xenfs/xenfs.h new file mode 100644 index 000000000000..51f08b2d0bf1 --- /dev/null +++ b/drivers/xen/xenfs/xenfs.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef _XENFS_XENBUS_H | ||
2 | #define _XENFS_XENBUS_H | ||
3 | |||
4 | extern const struct file_operations xenbus_file_ops; | ||
5 | |||
6 | #endif /* _XENFS_XENBUS_H */ | ||