aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/sleep/main.c67
-rw-r--r--drivers/acpi/sleep/proc.c2
-rw-r--r--drivers/ata/libata-core.c8
-rw-r--r--drivers/base/topology.c3
-rw-r--r--drivers/block/loop.c6
-rw-r--r--drivers/block/nbd.c15
-rw-r--r--drivers/char/hw_random/Kconfig14
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/pasemi-rng.c156
-rw-r--r--drivers/char/pcmcia/Kconfig1
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c44
-rw-r--r--drivers/char/pcmcia/cm4040_cs.c7
-rw-r--r--drivers/char/tty_io.c14
-rw-r--r--drivers/cpufreq/cpufreq.c3
-rw-r--r--drivers/cpufreq/cpufreq_stats.c2
-rw-r--r--drivers/hwmon/coretemp.c2
-rw-r--r--drivers/i2c/chips/tps65010.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c6
-rw-r--r--drivers/kvm/kvm_main.c3
-rw-r--r--drivers/mca/mca-bus.c28
-rw-r--r--drivers/mca/mca-driver.c13
-rw-r--r--drivers/md/Kconfig9
-rw-r--r--drivers/md/Makefile1
-rw-r--r--drivers/md/dm-bio-list.h26
-rw-r--r--drivers/md/dm-crypt.c91
-rw-r--r--drivers/md/dm-delay.c383
-rw-r--r--drivers/md/dm-exception-store.c54
-rw-r--r--drivers/md/dm-hw-handler.h1
-rw-r--r--drivers/md/dm-io.c232
-rw-r--r--drivers/md/dm-io.h83
-rw-r--r--drivers/md/dm-log.c77
-rw-r--r--drivers/md/dm-mpath.c3
-rw-r--r--drivers/md/dm-raid1.c187
-rw-r--r--drivers/md/dm-table.c10
-rw-r--r--drivers/md/dm.c1
-rw-r--r--drivers/md/kcopyd.c28
-rw-r--r--drivers/md/md.c186
-rw-r--r--drivers/md/raid1.c1
-rw-r--r--drivers/md/raid5.c6
-rw-r--r--drivers/mmc/core/core.c4
-rw-r--r--drivers/net/e1000/e1000_main.c2
-rw-r--r--drivers/net/phy/phy.c6
-rw-r--r--drivers/net/tg3.c11
-rw-r--r--drivers/net/tg3.h2
-rw-r--r--drivers/spi/atmel_spi.c5
-rw-r--r--drivers/usb/atm/usbatm.c2
-rw-r--r--drivers/video/Kconfig28
-rw-r--r--drivers/video/Makefile2
-rw-r--r--drivers/video/arkfb.c1200
-rw-r--r--drivers/video/fbmem.c4
-rw-r--r--drivers/video/nvidia/nv_hw.c7
-rw-r--r--drivers/video/nvidia/nvidia.c1
-rw-r--r--drivers/video/s3fb.c19
-rw-r--r--drivers/video/svgalib.c17
-rw-r--r--drivers/video/vt8623fb.c927
55 files changed, 3521 insertions, 492 deletions
diff --git a/drivers/acpi/sleep/main.c b/drivers/acpi/sleep/main.c
index f8c63410bcbf..52b23471dd69 100644
--- a/drivers/acpi/sleep/main.c
+++ b/drivers/acpi/sleep/main.c
@@ -29,7 +29,6 @@ static u32 acpi_suspend_states[] = {
29 [PM_SUSPEND_ON] = ACPI_STATE_S0, 29 [PM_SUSPEND_ON] = ACPI_STATE_S0,
30 [PM_SUSPEND_STANDBY] = ACPI_STATE_S1, 30 [PM_SUSPEND_STANDBY] = ACPI_STATE_S1,
31 [PM_SUSPEND_MEM] = ACPI_STATE_S3, 31 [PM_SUSPEND_MEM] = ACPI_STATE_S3,
32 [PM_SUSPEND_DISK] = ACPI_STATE_S4,
33 [PM_SUSPEND_MAX] = ACPI_STATE_S5 32 [PM_SUSPEND_MAX] = ACPI_STATE_S5
34}; 33};
35 34
@@ -94,14 +93,6 @@ static int acpi_pm_enter(suspend_state_t pm_state)
94 do_suspend_lowlevel(); 93 do_suspend_lowlevel();
95 break; 94 break;
96 95
97 case PM_SUSPEND_DISK:
98 if (acpi_pm_ops.pm_disk_mode == PM_DISK_PLATFORM)
99 status = acpi_enter_sleep_state(acpi_state);
100 break;
101 case PM_SUSPEND_MAX:
102 acpi_power_off();
103 break;
104
105 default: 96 default:
106 return -EINVAL; 97 return -EINVAL;
107 } 98 }
@@ -157,12 +148,13 @@ int acpi_suspend(u32 acpi_state)
157 suspend_state_t states[] = { 148 suspend_state_t states[] = {
158 [1] = PM_SUSPEND_STANDBY, 149 [1] = PM_SUSPEND_STANDBY,
159 [3] = PM_SUSPEND_MEM, 150 [3] = PM_SUSPEND_MEM,
160 [4] = PM_SUSPEND_DISK,
161 [5] = PM_SUSPEND_MAX 151 [5] = PM_SUSPEND_MAX
162 }; 152 };
163 153
164 if (acpi_state < 6 && states[acpi_state]) 154 if (acpi_state < 6 && states[acpi_state])
165 return pm_suspend(states[acpi_state]); 155 return pm_suspend(states[acpi_state]);
156 if (acpi_state == 4)
157 return hibernate();
166 return -EINVAL; 158 return -EINVAL;
167} 159}
168 160
@@ -189,6 +181,49 @@ static struct pm_ops acpi_pm_ops = {
189 .finish = acpi_pm_finish, 181 .finish = acpi_pm_finish,
190}; 182};
191 183
184#ifdef CONFIG_SOFTWARE_SUSPEND
185static int acpi_hibernation_prepare(void)
186{
187 return acpi_sleep_prepare(ACPI_STATE_S4);
188}
189
190static int acpi_hibernation_enter(void)
191{
192 acpi_status status = AE_OK;
193 unsigned long flags = 0;
194
195 ACPI_FLUSH_CPU_CACHE();
196
197 local_irq_save(flags);
198 acpi_enable_wakeup_device(ACPI_STATE_S4);
199 /* This shouldn't return. If it returns, we have a problem */
200 status = acpi_enter_sleep_state(ACPI_STATE_S4);
201 local_irq_restore(flags);
202
203 return ACPI_SUCCESS(status) ? 0 : -EFAULT;
204}
205
206static void acpi_hibernation_finish(void)
207{
208 acpi_leave_sleep_state(ACPI_STATE_S4);
209 acpi_disable_wakeup_device(ACPI_STATE_S4);
210
211 /* reset firmware waking vector */
212 acpi_set_firmware_waking_vector((acpi_physical_address) 0);
213
214 if (init_8259A_after_S1) {
215 printk("Broken toshiba laptop -> kicking interrupts\n");
216 init_8259A(0);
217 }
218}
219
220static struct hibernation_ops acpi_hibernation_ops = {
221 .prepare = acpi_hibernation_prepare,
222 .enter = acpi_hibernation_enter,
223 .finish = acpi_hibernation_finish,
224};
225#endif /* CONFIG_SOFTWARE_SUSPEND */
226
192/* 227/*
193 * Toshiba fails to preserve interrupts over S1, reinitialization 228 * Toshiba fails to preserve interrupts over S1, reinitialization
194 * of 8259 is needed after S1 resume. 229 * of 8259 is needed after S1 resume.
@@ -227,14 +262,18 @@ int __init acpi_sleep_init(void)
227 sleep_states[i] = 1; 262 sleep_states[i] = 1;
228 printk(" S%d", i); 263 printk(" S%d", i);
229 } 264 }
230 if (i == ACPI_STATE_S4) {
231 if (sleep_states[i])
232 acpi_pm_ops.pm_disk_mode = PM_DISK_PLATFORM;
233 }
234 } 265 }
235 printk(")\n"); 266 printk(")\n");
236 267
237 pm_set_ops(&acpi_pm_ops); 268 pm_set_ops(&acpi_pm_ops);
269
270#ifdef CONFIG_SOFTWARE_SUSPEND
271 if (sleep_states[ACPI_STATE_S4])
272 hibernation_set_ops(&acpi_hibernation_ops);
273#else
274 sleep_states[ACPI_STATE_S4] = 0;
275#endif
276
238 return 0; 277 return 0;
239} 278}
240 279
diff --git a/drivers/acpi/sleep/proc.c b/drivers/acpi/sleep/proc.c
index 5a76e5be61d5..76b45f0b8341 100644
--- a/drivers/acpi/sleep/proc.c
+++ b/drivers/acpi/sleep/proc.c
@@ -60,7 +60,7 @@ acpi_system_write_sleep(struct file *file,
60 state = simple_strtoul(str, NULL, 0); 60 state = simple_strtoul(str, NULL, 0);
61#ifdef CONFIG_SOFTWARE_SUSPEND 61#ifdef CONFIG_SOFTWARE_SUSPEND
62 if (state == 4) { 62 if (state == 4) {
63 error = pm_suspend(PM_SUSPEND_DISK); 63 error = hibernate();
64 goto Done; 64 goto Done;
65 } 65 }
66#endif 66#endif
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index a7950885d18e..fef87dd70d17 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -1316,7 +1316,7 @@ void ata_port_flush_task(struct ata_port *ap)
1316 spin_unlock_irqrestore(ap->lock, flags); 1316 spin_unlock_irqrestore(ap->lock, flags);
1317 1317
1318 DPRINTK("flush #1\n"); 1318 DPRINTK("flush #1\n");
1319 flush_workqueue(ata_wq); 1319 cancel_work_sync(&ap->port_task.work); /* akpm: seems unneeded */
1320 1320
1321 /* 1321 /*
1322 * At this point, if a task is running, it's guaranteed to see 1322 * At this point, if a task is running, it's guaranteed to see
@@ -1327,7 +1327,7 @@ void ata_port_flush_task(struct ata_port *ap)
1327 if (ata_msg_ctl(ap)) 1327 if (ata_msg_ctl(ap))
1328 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n", 1328 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
1329 __FUNCTION__); 1329 __FUNCTION__);
1330 flush_workqueue(ata_wq); 1330 cancel_work_sync(&ap->port_task.work);
1331 } 1331 }
1332 1332
1333 spin_lock_irqsave(ap->lock, flags); 1333 spin_lock_irqsave(ap->lock, flags);
@@ -6475,9 +6475,9 @@ void ata_port_detach(struct ata_port *ap)
6475 /* Flush hotplug task. The sequence is similar to 6475 /* Flush hotplug task. The sequence is similar to
6476 * ata_port_flush_task(). 6476 * ata_port_flush_task().
6477 */ 6477 */
6478 flush_workqueue(ata_aux_wq); 6478 cancel_work_sync(&ap->hotplug_task.work); /* akpm: why? */
6479 cancel_delayed_work(&ap->hotplug_task); 6479 cancel_delayed_work(&ap->hotplug_task);
6480 flush_workqueue(ata_aux_wq); 6480 cancel_work_sync(&ap->hotplug_task.work);
6481 6481
6482 skip_eh: 6482 skip_eh:
6483 /* remove the associated SCSI host */ 6483 /* remove the associated SCSI host */
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index 067a9e8bc377..8d8cdfec6529 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -126,10 +126,13 @@ static int __cpuinit topology_cpu_callback(struct notifier_block *nfb,
126 126
127 switch (action) { 127 switch (action) {
128 case CPU_UP_PREPARE: 128 case CPU_UP_PREPARE:
129 case CPU_UP_PREPARE_FROZEN:
129 rc = topology_add_dev(cpu); 130 rc = topology_add_dev(cpu);
130 break; 131 break;
131 case CPU_UP_CANCELED: 132 case CPU_UP_CANCELED:
133 case CPU_UP_CANCELED_FROZEN:
132 case CPU_DEAD: 134 case CPU_DEAD:
135 case CPU_DEAD_FROZEN:
133 topology_remove_dev(cpu); 136 topology_remove_dev(cpu);
134 break; 137 break;
135 } 138 }
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index af6d7274a7cc..18cdd8c77626 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -243,17 +243,13 @@ static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec,
243 transfer_result = lo_do_transfer(lo, WRITE, page, offset, 243 transfer_result = lo_do_transfer(lo, WRITE, page, offset,
244 bvec->bv_page, bv_offs, size, IV); 244 bvec->bv_page, bv_offs, size, IV);
245 if (unlikely(transfer_result)) { 245 if (unlikely(transfer_result)) {
246 char *kaddr;
247
248 /* 246 /*
249 * The transfer failed, but we still write the data to 247 * The transfer failed, but we still write the data to
250 * keep prepare/commit calls balanced. 248 * keep prepare/commit calls balanced.
251 */ 249 */
252 printk(KERN_ERR "loop: transfer error block %llu\n", 250 printk(KERN_ERR "loop: transfer error block %llu\n",
253 (unsigned long long)index); 251 (unsigned long long)index);
254 kaddr = kmap_atomic(page, KM_USER0); 252 zero_user_page(page, offset, size, KM_USER0);
255 memset(kaddr + offset, 0, size);
256 kunmap_atomic(kaddr, KM_USER0);
257 } 253 }
258 flush_dcache_page(page); 254 flush_dcache_page(page);
259 ret = aops->commit_write(file, page, offset, 255 ret = aops->commit_write(file, page, offset,
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 090796bef78f..069ae39a9cd9 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -366,20 +366,25 @@ static struct disk_attribute pid_attr = {
366 .show = pid_show, 366 .show = pid_show,
367}; 367};
368 368
369static void nbd_do_it(struct nbd_device *lo) 369static int nbd_do_it(struct nbd_device *lo)
370{ 370{
371 struct request *req; 371 struct request *req;
372 int ret;
372 373
373 BUG_ON(lo->magic != LO_MAGIC); 374 BUG_ON(lo->magic != LO_MAGIC);
374 375
375 lo->pid = current->pid; 376 lo->pid = current->pid;
376 sysfs_create_file(&lo->disk->kobj, &pid_attr.attr); 377 ret = sysfs_create_file(&lo->disk->kobj, &pid_attr.attr);
378 if (ret) {
379 printk(KERN_ERR "nbd: sysfs_create_file failed!");
380 return ret;
381 }
377 382
378 while ((req = nbd_read_stat(lo)) != NULL) 383 while ((req = nbd_read_stat(lo)) != NULL)
379 nbd_end_request(req); 384 nbd_end_request(req);
380 385
381 sysfs_remove_file(&lo->disk->kobj, &pid_attr.attr); 386 sysfs_remove_file(&lo->disk->kobj, &pid_attr.attr);
382 return; 387 return 0;
383} 388}
384 389
385static void nbd_clear_que(struct nbd_device *lo) 390static void nbd_clear_que(struct nbd_device *lo)
@@ -569,7 +574,9 @@ static int nbd_ioctl(struct inode *inode, struct file *file,
569 case NBD_DO_IT: 574 case NBD_DO_IT:
570 if (!lo->file) 575 if (!lo->file)
571 return -EINVAL; 576 return -EINVAL;
572 nbd_do_it(lo); 577 error = nbd_do_it(lo);
578 if (error)
579 return error;
573 /* on return tidy up in case we have a signal */ 580 /* on return tidy up in case we have a signal */
574 /* Forcibly shutdown the socket causing all listeners 581 /* Forcibly shutdown the socket causing all listeners
575 * to error 582 * to error
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 5f3acd8e64b8..7cda04b33534 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -91,3 +91,17 @@ config HW_RANDOM_OMAP
91 module will be called omap-rng. 91 module will be called omap-rng.
92 92
93 If unsure, say Y. 93 If unsure, say Y.
94
95config HW_RANDOM_PASEMI
96 tristate "PA Semi HW Random Number Generator support"
97 depends on HW_RANDOM && PPC_PASEMI
98 default HW_RANDOM
99 ---help---
100 This driver provides kernel-side support for the Random Number
101 Generator hardware found on PA6T-1682M processor.
102
103 To compile this driver as a module, choose M here: the
104 module will be called pasemi-rng.
105
106 If unsure, say Y.
107
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index c41fa19454e3..c8b7300e2fb1 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -10,3 +10,4 @@ obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o
10obj-$(CONFIG_HW_RANDOM_VIA) += via-rng.o 10obj-$(CONFIG_HW_RANDOM_VIA) += via-rng.o
11obj-$(CONFIG_HW_RANDOM_IXP4XX) += ixp4xx-rng.o 11obj-$(CONFIG_HW_RANDOM_IXP4XX) += ixp4xx-rng.o
12obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o 12obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o
13obj-$(CONFIG_HW_RANDOM_PASEMI) += pasemi-rng.o
diff --git a/drivers/char/hw_random/pasemi-rng.c b/drivers/char/hw_random/pasemi-rng.c
new file mode 100644
index 000000000000..fa6040b6c8f2
--- /dev/null
+++ b/drivers/char/hw_random/pasemi-rng.c
@@ -0,0 +1,156 @@
1/*
2 * Copyright (C) 2006-2007 PA Semi, Inc
3 *
4 * Maintained by: Olof Johansson <olof@lixom.net>
5 *
6 * Driver for the PWRficient onchip rng
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#include <linux/module.h>
23#include <linux/kernel.h>
24#include <linux/platform_device.h>
25#include <linux/hw_random.h>
26#include <asm/of_platform.h>
27#include <asm/io.h>
28
29#define SDCRNG_CTL_REG 0x00
30#define SDCRNG_CTL_FVLD_M 0x0000f000
31#define SDCRNG_CTL_FVLD_S 12
32#define SDCRNG_CTL_KSZ 0x00000800
33#define SDCRNG_CTL_RSRC_CRG 0x00000010
34#define SDCRNG_CTL_RSRC_RRG 0x00000000
35#define SDCRNG_CTL_CE 0x00000004
36#define SDCRNG_CTL_RE 0x00000002
37#define SDCRNG_CTL_DR 0x00000001
38#define SDCRNG_CTL_SELECT_RRG_RNG (SDCRNG_CTL_RE | SDCRNG_CTL_RSRC_RRG)
39#define SDCRNG_CTL_SELECT_CRG_RNG (SDCRNG_CTL_CE | SDCRNG_CTL_RSRC_CRG)
40#define SDCRNG_VAL_REG 0x20
41
42#define MODULE_NAME "pasemi_rng"
43
44static int pasemi_rng_data_present(struct hwrng *rng)
45{
46 void __iomem *rng_regs = (void __iomem *)rng->priv;
47
48 return (in_le32(rng_regs + SDCRNG_CTL_REG)
49 & SDCRNG_CTL_FVLD_M) ? 1 : 0;
50}
51
52static int pasemi_rng_data_read(struct hwrng *rng, u32 *data)
53{
54 void __iomem *rng_regs = (void __iomem *)rng->priv;
55 *data = in_le32(rng_regs + SDCRNG_VAL_REG);
56 return 4;
57}
58
59static int pasemi_rng_init(struct hwrng *rng)
60{
61 void __iomem *rng_regs = (void __iomem *)rng->priv;
62 u32 ctl;
63
64 ctl = SDCRNG_CTL_DR | SDCRNG_CTL_SELECT_RRG_RNG | SDCRNG_CTL_KSZ;
65 out_le32(rng_regs + SDCRNG_CTL_REG, ctl);
66 out_le32(rng_regs + SDCRNG_CTL_REG, ctl & ~SDCRNG_CTL_DR);
67
68 return 0;
69}
70
71static void pasemi_rng_cleanup(struct hwrng *rng)
72{
73 void __iomem *rng_regs = (void __iomem *)rng->priv;
74 u32 ctl;
75
76 ctl = SDCRNG_CTL_RE | SDCRNG_CTL_CE;
77 out_le32(rng_regs + SDCRNG_CTL_REG,
78 in_le32(rng_regs + SDCRNG_CTL_REG) & ~ctl);
79}
80
81static struct hwrng pasemi_rng = {
82 .name = MODULE_NAME,
83 .init = pasemi_rng_init,
84 .cleanup = pasemi_rng_cleanup,
85 .data_present = pasemi_rng_data_present,
86 .data_read = pasemi_rng_data_read,
87};
88
89static int __devinit rng_probe(struct of_device *ofdev,
90 const struct of_device_id *match)
91{
92 void __iomem *rng_regs;
93 struct device_node *rng_np = ofdev->node;
94 struct resource res;
95 int err = 0;
96
97 err = of_address_to_resource(rng_np, 0, &res);
98 if (err)
99 return -ENODEV;
100
101 rng_regs = ioremap(res.start, 0x100);
102
103 if (!rng_regs)
104 return -ENOMEM;
105
106 pasemi_rng.priv = (unsigned long)rng_regs;
107
108 printk(KERN_INFO "Registering PA Semi RNG\n");
109
110 err = hwrng_register(&pasemi_rng);
111
112 if (err)
113 iounmap(rng_regs);
114
115 return err;
116}
117
118static int __devexit rng_remove(struct of_device *dev)
119{
120 void __iomem *rng_regs = (void __iomem *)pasemi_rng.priv;
121
122 hwrng_unregister(&pasemi_rng);
123 iounmap(rng_regs);
124
125 return 0;
126}
127
128static struct of_device_id rng_match[] = {
129 {
130 .compatible = "1682m-rng",
131 },
132 {},
133};
134
135static struct of_platform_driver rng_driver = {
136 .name = "pasemi-rng",
137 .match_table = rng_match,
138 .probe = rng_probe,
139 .remove = rng_remove,
140};
141
142static int __init rng_init(void)
143{
144 return of_register_platform_driver(&rng_driver);
145}
146module_init(rng_init);
147
148static void __exit rng_exit(void)
149{
150 of_unregister_platform_driver(&rng_driver);
151}
152module_exit(rng_exit);
153
154MODULE_LICENSE("GPL");
155MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>");
156MODULE_DESCRIPTION("H/W RNG driver for PA Semi processor");
diff --git a/drivers/char/pcmcia/Kconfig b/drivers/char/pcmcia/Kconfig
index 27c1179ee527..f25facd97bb4 100644
--- a/drivers/char/pcmcia/Kconfig
+++ b/drivers/char/pcmcia/Kconfig
@@ -21,6 +21,7 @@ config SYNCLINK_CS
21config CARDMAN_4000 21config CARDMAN_4000
22 tristate "Omnikey Cardman 4000 support" 22 tristate "Omnikey Cardman 4000 support"
23 depends on PCMCIA 23 depends on PCMCIA
24 select BITREVERSE
24 help 25 help
25 Enable support for the Omnikey Cardman 4000 PCMCIA Smartcard 26 Enable support for the Omnikey Cardman 4000 PCMCIA Smartcard
26 reader. 27 reader.
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index 4ea587983aef..fee58e03dbe2 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -31,6 +31,7 @@
31#include <linux/init.h> 31#include <linux/init.h>
32#include <linux/fs.h> 32#include <linux/fs.h>
33#include <linux/delay.h> 33#include <linux/delay.h>
34#include <linux/bitrev.h>
34#include <asm/uaccess.h> 35#include <asm/uaccess.h>
35#include <asm/io.h> 36#include <asm/io.h>
36 37
@@ -194,41 +195,17 @@ static inline unsigned char xinb(unsigned short port)
194} 195}
195#endif 196#endif
196 197
197#define b_0000 15 198static inline unsigned char invert_revert(unsigned char ch)
198#define b_0001 14 199{
199#define b_0010 13 200 return bitrev8(~ch);
200#define b_0011 12 201}
201#define b_0100 11
202#define b_0101 10
203#define b_0110 9
204#define b_0111 8
205#define b_1000 7
206#define b_1001 6
207#define b_1010 5
208#define b_1011 4
209#define b_1100 3
210#define b_1101 2
211#define b_1110 1
212#define b_1111 0
213
214static unsigned char irtab[16] = {
215 b_0000, b_1000, b_0100, b_1100,
216 b_0010, b_1010, b_0110, b_1110,
217 b_0001, b_1001, b_0101, b_1101,
218 b_0011, b_1011, b_0111, b_1111
219};
220 202
221static void str_invert_revert(unsigned char *b, int len) 203static void str_invert_revert(unsigned char *b, int len)
222{ 204{
223 int i; 205 int i;
224 206
225 for (i = 0; i < len; i++) 207 for (i = 0; i < len; i++)
226 b[i] = (irtab[b[i] & 0x0f] << 4) | irtab[b[i] >> 4]; 208 b[i] = invert_revert(b[i]);
227}
228
229static unsigned char invert_revert(unsigned char ch)
230{
231 return (irtab[ch & 0x0f] << 4) | irtab[ch >> 4];
232} 209}
233 210
234#define ATRLENCK(dev,pos) \ 211#define ATRLENCK(dev,pos) \
@@ -1881,8 +1858,11 @@ static int cm4000_probe(struct pcmcia_device *link)
1881 init_waitqueue_head(&dev->readq); 1858 init_waitqueue_head(&dev->readq);
1882 1859
1883 ret = cm4000_config(link, i); 1860 ret = cm4000_config(link, i);
1884 if (ret) 1861 if (ret) {
1862 dev_table[i] = NULL;
1863 kfree(dev);
1885 return ret; 1864 return ret;
1865 }
1886 1866
1887 class_device_create(cmm_class, NULL, MKDEV(major, i), NULL, 1867 class_device_create(cmm_class, NULL, MKDEV(major, i), NULL,
1888 "cmm%d", i); 1868 "cmm%d", i);
@@ -1907,7 +1887,7 @@ static void cm4000_detach(struct pcmcia_device *link)
1907 cm4000_release(link); 1887 cm4000_release(link);
1908 1888
1909 dev_table[devno] = NULL; 1889 dev_table[devno] = NULL;
1910 kfree(dev); 1890 kfree(dev);
1911 1891
1912 class_device_destroy(cmm_class, MKDEV(major, devno)); 1892 class_device_destroy(cmm_class, MKDEV(major, devno));
1913 1893
@@ -1956,12 +1936,14 @@ static int __init cmm_init(void)
1956 if (major < 0) { 1936 if (major < 0) {
1957 printk(KERN_WARNING MODULE_NAME 1937 printk(KERN_WARNING MODULE_NAME
1958 ": could not get major number\n"); 1938 ": could not get major number\n");
1939 class_destroy(cmm_class);
1959 return major; 1940 return major;
1960 } 1941 }
1961 1942
1962 rc = pcmcia_register_driver(&cm4000_driver); 1943 rc = pcmcia_register_driver(&cm4000_driver);
1963 if (rc < 0) { 1944 if (rc < 0) {
1964 unregister_chrdev(major, DEVICE_NAME); 1945 unregister_chrdev(major, DEVICE_NAME);
1946 class_destroy(cmm_class);
1965 return rc; 1947 return rc;
1966 } 1948 }
1967 1949
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
index f2e4ec4fd407..af88181a17f4 100644
--- a/drivers/char/pcmcia/cm4040_cs.c
+++ b/drivers/char/pcmcia/cm4040_cs.c
@@ -636,8 +636,11 @@ static int reader_probe(struct pcmcia_device *link)
636 setup_timer(&dev->poll_timer, cm4040_do_poll, 0); 636 setup_timer(&dev->poll_timer, cm4040_do_poll, 0);
637 637
638 ret = reader_config(link, i); 638 ret = reader_config(link, i);
639 if (ret) 639 if (ret) {
640 dev_table[i] = NULL;
641 kfree(dev);
640 return ret; 642 return ret;
643 }
641 644
642 class_device_create(cmx_class, NULL, MKDEV(major, i), NULL, 645 class_device_create(cmx_class, NULL, MKDEV(major, i), NULL,
643 "cmx%d", i); 646 "cmx%d", i);
@@ -708,12 +711,14 @@ static int __init cm4040_init(void)
708 if (major < 0) { 711 if (major < 0) {
709 printk(KERN_WARNING MODULE_NAME 712 printk(KERN_WARNING MODULE_NAME
710 ": could not get major number\n"); 713 ": could not get major number\n");
714 class_destroy(cmx_class);
711 return major; 715 return major;
712 } 716 }
713 717
714 rc = pcmcia_register_driver(&reader_driver); 718 rc = pcmcia_register_driver(&reader_driver);
715 if (rc < 0) { 719 if (rc < 0) {
716 unregister_chrdev(major, DEVICE_NAME); 720 unregister_chrdev(major, DEVICE_NAME);
721 class_destroy(cmx_class);
717 return rc; 722 return rc;
718 } 723 }
719 724
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index f6ac1d316ea4..fc662e4ce58a 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -934,13 +934,6 @@ restart:
934 return -EINVAL; 934 return -EINVAL;
935 935
936 /* 936 /*
937 * No more input please, we are switching. The new ldisc
938 * will update this value in the ldisc open function
939 */
940
941 tty->receive_room = 0;
942
943 /*
944 * Problem: What do we do if this blocks ? 937 * Problem: What do we do if this blocks ?
945 */ 938 */
946 939
@@ -951,6 +944,13 @@ restart:
951 return 0; 944 return 0;
952 } 945 }
953 946
947 /*
948 * No more input please, we are switching. The new ldisc
949 * will update this value in the ldisc open function
950 */
951
952 tty->receive_room = 0;
953
954 o_ldisc = tty->ldisc; 954 o_ldisc = tty->ldisc;
955 o_tty = tty->link; 955 o_tty = tty->link;
956 956
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 893dbaf386fb..eb37fba9b7ef 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1685,9 +1685,11 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
1685 if (sys_dev) { 1685 if (sys_dev) {
1686 switch (action) { 1686 switch (action) {
1687 case CPU_ONLINE: 1687 case CPU_ONLINE:
1688 case CPU_ONLINE_FROZEN:
1688 cpufreq_add_dev(sys_dev); 1689 cpufreq_add_dev(sys_dev);
1689 break; 1690 break;
1690 case CPU_DOWN_PREPARE: 1691 case CPU_DOWN_PREPARE:
1692 case CPU_DOWN_PREPARE_FROZEN:
1691 if (unlikely(lock_policy_rwsem_write(cpu))) 1693 if (unlikely(lock_policy_rwsem_write(cpu)))
1692 BUG(); 1694 BUG();
1693 1695
@@ -1699,6 +1701,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
1699 __cpufreq_remove_dev(sys_dev); 1701 __cpufreq_remove_dev(sys_dev);
1700 break; 1702 break;
1701 case CPU_DOWN_FAILED: 1703 case CPU_DOWN_FAILED:
1704 case CPU_DOWN_FAILED_FROZEN:
1702 cpufreq_add_dev(sys_dev); 1705 cpufreq_add_dev(sys_dev);
1703 break; 1706 break;
1704 } 1707 }
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index d1c7cac9316c..d2f0cbd8b8f3 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -313,9 +313,11 @@ static int cpufreq_stat_cpu_callback(struct notifier_block *nfb,
313 313
314 switch (action) { 314 switch (action) {
315 case CPU_ONLINE: 315 case CPU_ONLINE:
316 case CPU_ONLINE_FROZEN:
316 cpufreq_update_policy(cpu); 317 cpufreq_update_policy(cpu);
317 break; 318 break;
318 case CPU_DEAD: 319 case CPU_DEAD:
320 case CPU_DEAD_FROZEN:
319 cpufreq_stats_free_table(cpu); 321 cpufreq_stats_free_table(cpu);
320 break; 322 break;
321 } 323 }
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 03b1f650d1c4..75e3911810a3 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -309,9 +309,11 @@ static int coretemp_cpu_callback(struct notifier_block *nfb,
309 309
310 switch (action) { 310 switch (action) {
311 case CPU_ONLINE: 311 case CPU_ONLINE:
312 case CPU_ONLINE_FROZEN:
312 coretemp_device_add(cpu); 313 coretemp_device_add(cpu);
313 break; 314 break;
314 case CPU_DEAD: 315 case CPU_DEAD:
316 case CPU_DEAD_FROZEN:
315 coretemp_device_remove(cpu); 317 coretemp_device_remove(cpu);
316 break; 318 break;
317 } 319 }
diff --git a/drivers/i2c/chips/tps65010.c b/drivers/i2c/chips/tps65010.c
index 7ed92dc3d833..3c3f2ebf3fc9 100644
--- a/drivers/i2c/chips/tps65010.c
+++ b/drivers/i2c/chips/tps65010.c
@@ -354,7 +354,7 @@ static void tps65010_interrupt(struct tps65010 *tps)
354 * also needs to get error handling and probably 354 * also needs to get error handling and probably
355 * an #ifdef CONFIG_SOFTWARE_SUSPEND 355 * an #ifdef CONFIG_SOFTWARE_SUSPEND
356 */ 356 */
357 pm_suspend(PM_SUSPEND_DISK); 357 hibernate();
358#endif 358#endif
359 poll = 1; 359 poll = 1;
360 } 360 }
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index f284be1c9166..82dda2faf4d0 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -745,6 +745,7 @@ static int comp_pool_callback(struct notifier_block *nfb,
745 745
746 switch (action) { 746 switch (action) {
747 case CPU_UP_PREPARE: 747 case CPU_UP_PREPARE:
748 case CPU_UP_PREPARE_FROZEN:
748 ehca_gen_dbg("CPU: %x (CPU_PREPARE)", cpu); 749 ehca_gen_dbg("CPU: %x (CPU_PREPARE)", cpu);
749 if(!create_comp_task(pool, cpu)) { 750 if(!create_comp_task(pool, cpu)) {
750 ehca_gen_err("Can't create comp_task for cpu: %x", cpu); 751 ehca_gen_err("Can't create comp_task for cpu: %x", cpu);
@@ -752,24 +753,29 @@ static int comp_pool_callback(struct notifier_block *nfb,
752 } 753 }
753 break; 754 break;
754 case CPU_UP_CANCELED: 755 case CPU_UP_CANCELED:
756 case CPU_UP_CANCELED_FROZEN:
755 ehca_gen_dbg("CPU: %x (CPU_CANCELED)", cpu); 757 ehca_gen_dbg("CPU: %x (CPU_CANCELED)", cpu);
756 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); 758 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
757 kthread_bind(cct->task, any_online_cpu(cpu_online_map)); 759 kthread_bind(cct->task, any_online_cpu(cpu_online_map));
758 destroy_comp_task(pool, cpu); 760 destroy_comp_task(pool, cpu);
759 break; 761 break;
760 case CPU_ONLINE: 762 case CPU_ONLINE:
763 case CPU_ONLINE_FROZEN:
761 ehca_gen_dbg("CPU: %x (CPU_ONLINE)", cpu); 764 ehca_gen_dbg("CPU: %x (CPU_ONLINE)", cpu);
762 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); 765 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
763 kthread_bind(cct->task, cpu); 766 kthread_bind(cct->task, cpu);
764 wake_up_process(cct->task); 767 wake_up_process(cct->task);
765 break; 768 break;
766 case CPU_DOWN_PREPARE: 769 case CPU_DOWN_PREPARE:
770 case CPU_DOWN_PREPARE_FROZEN:
767 ehca_gen_dbg("CPU: %x (CPU_DOWN_PREPARE)", cpu); 771 ehca_gen_dbg("CPU: %x (CPU_DOWN_PREPARE)", cpu);
768 break; 772 break;
769 case CPU_DOWN_FAILED: 773 case CPU_DOWN_FAILED:
774 case CPU_DOWN_FAILED_FROZEN:
770 ehca_gen_dbg("CPU: %x (CPU_DOWN_FAILED)", cpu); 775 ehca_gen_dbg("CPU: %x (CPU_DOWN_FAILED)", cpu);
771 break; 776 break;
772 case CPU_DEAD: 777 case CPU_DEAD:
778 case CPU_DEAD_FROZEN:
773 ehca_gen_dbg("CPU: %x (CPU_DEAD)", cpu); 779 ehca_gen_dbg("CPU: %x (CPU_DEAD)", cpu);
774 destroy_comp_task(pool, cpu); 780 destroy_comp_task(pool, cpu);
775 take_over_work(pool, cpu); 781 take_over_work(pool, cpu);
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index c8b8cfa332bb..0d892600ff00 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -2889,7 +2889,9 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
2889 2889
2890 switch (val) { 2890 switch (val) {
2891 case CPU_DOWN_PREPARE: 2891 case CPU_DOWN_PREPARE:
2892 case CPU_DOWN_PREPARE_FROZEN:
2892 case CPU_UP_CANCELED: 2893 case CPU_UP_CANCELED:
2894 case CPU_UP_CANCELED_FROZEN:
2893 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n", 2895 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
2894 cpu); 2896 cpu);
2895 decache_vcpus_on_cpu(cpu); 2897 decache_vcpus_on_cpu(cpu);
@@ -2897,6 +2899,7 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
2897 NULL, 0, 1); 2899 NULL, 0, 1);
2898 break; 2900 break;
2899 case CPU_ONLINE: 2901 case CPU_ONLINE:
2902 case CPU_ONLINE_FROZEN:
2900 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n", 2903 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
2901 cpu); 2904 cpu);
2902 smp_call_function_single(cpu, kvm_arch_ops->hardware_enable, 2905 smp_call_function_single(cpu, kvm_arch_ops->hardware_enable,
diff --git a/drivers/mca/mca-bus.c b/drivers/mca/mca-bus.c
index da862e4632dd..67b8e9453b19 100644
--- a/drivers/mca/mca-bus.c
+++ b/drivers/mca/mca-bus.c
@@ -47,19 +47,25 @@ static int mca_bus_match (struct device *dev, struct device_driver *drv)
47{ 47{
48 struct mca_device *mca_dev = to_mca_device (dev); 48 struct mca_device *mca_dev = to_mca_device (dev);
49 struct mca_driver *mca_drv = to_mca_driver (drv); 49 struct mca_driver *mca_drv = to_mca_driver (drv);
50 const short *mca_ids = mca_drv->id_table; 50 const unsigned short *mca_ids = mca_drv->id_table;
51 int i; 51 int i = 0;
52 52
53 if (!mca_ids) 53 if (mca_ids) {
54 return 0; 54 for(i = 0; mca_ids[i]; i++) {
55 55 if (mca_ids[i] == mca_dev->pos_id) {
56 for(i = 0; mca_ids[i]; i++) { 56 mca_dev->index = i;
57 if (mca_ids[i] == mca_dev->pos_id) { 57 return 1;
58 mca_dev->index = i; 58 }
59 return 1;
60 } 59 }
61 } 60 }
62 61 /* If the integrated id is present, treat it as though it were an
62 * additional id in the id_table (it can't be because by definition,
63 * integrated id's overflow a short */
64 if (mca_drv->integrated_id && mca_dev->pos_id ==
65 mca_drv->integrated_id) {
66 mca_dev->index = i;
67 return 1;
68 }
63 return 0; 69 return 0;
64} 70}
65 71
diff --git a/drivers/mca/mca-driver.c b/drivers/mca/mca-driver.c
index 2223466b3d8a..32cd39bcc715 100644
--- a/drivers/mca/mca-driver.c
+++ b/drivers/mca/mca-driver.c
@@ -36,12 +36,25 @@ int mca_register_driver(struct mca_driver *mca_drv)
36 mca_drv->driver.bus = &mca_bus_type; 36 mca_drv->driver.bus = &mca_bus_type;
37 if ((r = driver_register(&mca_drv->driver)) < 0) 37 if ((r = driver_register(&mca_drv->driver)) < 0)
38 return r; 38 return r;
39 mca_drv->integrated_id = 0;
39 } 40 }
40 41
41 return 0; 42 return 0;
42} 43}
43EXPORT_SYMBOL(mca_register_driver); 44EXPORT_SYMBOL(mca_register_driver);
44 45
46int mca_register_driver_integrated(struct mca_driver *mca_driver,
47 int integrated_id)
48{
49 int r = mca_register_driver(mca_driver);
50
51 if (!r)
52 mca_driver->integrated_id = integrated_id;
53
54 return r;
55}
56EXPORT_SYMBOL(mca_register_driver_integrated);
57
45void mca_unregister_driver(struct mca_driver *mca_drv) 58void mca_unregister_driver(struct mca_driver *mca_drv)
46{ 59{
47 if (MCA_bus) 60 if (MCA_bus)
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 4540ade6b6b5..7df934d69134 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -262,6 +262,15 @@ config DM_MULTIPATH_EMC
262 ---help--- 262 ---help---
263 Multipath support for EMC CX/AX series hardware. 263 Multipath support for EMC CX/AX series hardware.
264 264
265config DM_DELAY
266 tristate "I/O delaying target (EXPERIMENTAL)"
267 depends on BLK_DEV_DM && EXPERIMENTAL
268 ---help---
269 A target that delays reads and/or writes and can send
270 them to different devices. Useful for testing.
271
272 If unsure, say N.
273
265endmenu 274endmenu
266 275
267endif 276endif
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 34957a68d921..38754084eac7 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_MD_FAULTY) += faulty.o
31obj-$(CONFIG_BLK_DEV_MD) += md-mod.o 31obj-$(CONFIG_BLK_DEV_MD) += md-mod.o
32obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o 32obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o
33obj-$(CONFIG_DM_CRYPT) += dm-crypt.o 33obj-$(CONFIG_DM_CRYPT) += dm-crypt.o
34obj-$(CONFIG_DM_DELAY) += dm-delay.o
34obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o 35obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o
35obj-$(CONFIG_DM_MULTIPATH_EMC) += dm-emc.o 36obj-$(CONFIG_DM_MULTIPATH_EMC) += dm-emc.o
36obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o 37obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o
diff --git a/drivers/md/dm-bio-list.h b/drivers/md/dm-bio-list.h
index da4349649f7f..c6be88826fae 100644
--- a/drivers/md/dm-bio-list.h
+++ b/drivers/md/dm-bio-list.h
@@ -8,17 +8,43 @@
8#define DM_BIO_LIST_H 8#define DM_BIO_LIST_H
9 9
10#include <linux/bio.h> 10#include <linux/bio.h>
11#include <linux/prefetch.h>
11 12
12struct bio_list { 13struct bio_list {
13 struct bio *head; 14 struct bio *head;
14 struct bio *tail; 15 struct bio *tail;
15}; 16};
16 17
18static inline int bio_list_empty(const struct bio_list *bl)
19{
20 return bl->head == NULL;
21}
22
23#define BIO_LIST_INIT { .head = NULL, .tail = NULL }
24
25#define BIO_LIST(bl) \
26 struct bio_list bl = BIO_LIST_INIT
27
17static inline void bio_list_init(struct bio_list *bl) 28static inline void bio_list_init(struct bio_list *bl)
18{ 29{
19 bl->head = bl->tail = NULL; 30 bl->head = bl->tail = NULL;
20} 31}
21 32
33#define bio_list_for_each(bio, bl) \
34 for (bio = (bl)->head; bio && ({ prefetch(bio->bi_next); 1; }); \
35 bio = bio->bi_next)
36
37static inline unsigned bio_list_size(const struct bio_list *bl)
38{
39 unsigned sz = 0;
40 struct bio *bio;
41
42 bio_list_for_each(bio, bl)
43 sz++;
44
45 return sz;
46}
47
22static inline void bio_list_add(struct bio_list *bl, struct bio *bio) 48static inline void bio_list_add(struct bio_list *bl, struct bio *bio)
23{ 49{
24 bio->bi_next = NULL; 50 bio->bi_next = NULL;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index d8121234c347..7b0fcfc9eaa5 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -33,7 +33,6 @@
33struct crypt_io { 33struct crypt_io {
34 struct dm_target *target; 34 struct dm_target *target;
35 struct bio *base_bio; 35 struct bio *base_bio;
36 struct bio *first_clone;
37 struct work_struct work; 36 struct work_struct work;
38 atomic_t pending; 37 atomic_t pending;
39 int error; 38 int error;
@@ -107,6 +106,8 @@ struct crypt_config {
107 106
108static struct kmem_cache *_crypt_io_pool; 107static struct kmem_cache *_crypt_io_pool;
109 108
109static void clone_init(struct crypt_io *, struct bio *);
110
110/* 111/*
111 * Different IV generation algorithms: 112 * Different IV generation algorithms:
112 * 113 *
@@ -120,6 +121,9 @@ static struct kmem_cache *_crypt_io_pool;
120 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1 121 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
121 * (needed for LRW-32-AES and possible other narrow block modes) 122 * (needed for LRW-32-AES and possible other narrow block modes)
122 * 123 *
124 * null: the initial vector is always zero. Provides compatibility with
125 * obsolete loop_fish2 devices. Do not use for new devices.
126 *
123 * plumb: unimplemented, see: 127 * plumb: unimplemented, see:
124 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454 128 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
125 */ 129 */
@@ -256,6 +260,13 @@ static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
256 return 0; 260 return 0;
257} 261}
258 262
263static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
264{
265 memset(iv, 0, cc->iv_size);
266
267 return 0;
268}
269
259static struct crypt_iv_operations crypt_iv_plain_ops = { 270static struct crypt_iv_operations crypt_iv_plain_ops = {
260 .generator = crypt_iv_plain_gen 271 .generator = crypt_iv_plain_gen
261}; 272};
@@ -272,6 +283,10 @@ static struct crypt_iv_operations crypt_iv_benbi_ops = {
272 .generator = crypt_iv_benbi_gen 283 .generator = crypt_iv_benbi_gen
273}; 284};
274 285
286static struct crypt_iv_operations crypt_iv_null_ops = {
287 .generator = crypt_iv_null_gen
288};
289
275static int 290static int
276crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out, 291crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out,
277 struct scatterlist *in, unsigned int length, 292 struct scatterlist *in, unsigned int length,
@@ -378,36 +393,21 @@ static int crypt_convert(struct crypt_config *cc,
378 * This should never violate the device limitations 393 * This should never violate the device limitations
379 * May return a smaller bio when running out of pages 394 * May return a smaller bio when running out of pages
380 */ 395 */
381static struct bio * 396static struct bio *crypt_alloc_buffer(struct crypt_io *io, unsigned int size)
382crypt_alloc_buffer(struct crypt_config *cc, unsigned int size,
383 struct bio *base_bio, unsigned int *bio_vec_idx)
384{ 397{
398 struct crypt_config *cc = io->target->private;
385 struct bio *clone; 399 struct bio *clone;
386 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 400 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
387 gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; 401 gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
388 unsigned int i; 402 unsigned int i;
389 403
390 if (base_bio) { 404 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
391 clone = bio_alloc_bioset(GFP_NOIO, base_bio->bi_max_vecs, cc->bs);
392 __bio_clone(clone, base_bio);
393 } else
394 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
395
396 if (!clone) 405 if (!clone)
397 return NULL; 406 return NULL;
398 407
399 clone->bi_destructor = dm_crypt_bio_destructor; 408 clone_init(io, clone);
400
401 /* if the last bio was not complete, continue where that one ended */
402 clone->bi_idx = *bio_vec_idx;
403 clone->bi_vcnt = *bio_vec_idx;
404 clone->bi_size = 0;
405 clone->bi_flags &= ~(1 << BIO_SEG_VALID);
406
407 /* clone->bi_idx pages have already been allocated */
408 size -= clone->bi_idx * PAGE_SIZE;
409 409
410 for (i = clone->bi_idx; i < nr_iovecs; i++) { 410 for (i = 0; i < nr_iovecs; i++) {
411 struct bio_vec *bv = bio_iovec_idx(clone, i); 411 struct bio_vec *bv = bio_iovec_idx(clone, i);
412 412
413 bv->bv_page = mempool_alloc(cc->page_pool, gfp_mask); 413 bv->bv_page = mempool_alloc(cc->page_pool, gfp_mask);
@@ -419,7 +419,7 @@ crypt_alloc_buffer(struct crypt_config *cc, unsigned int size,
419 * return a partially allocated bio, the caller will then try 419 * return a partially allocated bio, the caller will then try
420 * to allocate additional bios while submitting this partial bio 420 * to allocate additional bios while submitting this partial bio
421 */ 421 */
422 if ((i - clone->bi_idx) == (MIN_BIO_PAGES - 1)) 422 if (i == (MIN_BIO_PAGES - 1))
423 gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT; 423 gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
424 424
425 bv->bv_offset = 0; 425 bv->bv_offset = 0;
@@ -438,12 +438,6 @@ crypt_alloc_buffer(struct crypt_config *cc, unsigned int size,
438 return NULL; 438 return NULL;
439 } 439 }
440 440
441 /*
442 * Remember the last bio_vec allocated to be able
443 * to correctly continue after the splitting.
444 */
445 *bio_vec_idx = clone->bi_vcnt;
446
447 return clone; 441 return clone;
448} 442}
449 443
@@ -495,9 +489,6 @@ static void dec_pending(struct crypt_io *io, int error)
495 if (!atomic_dec_and_test(&io->pending)) 489 if (!atomic_dec_and_test(&io->pending))
496 return; 490 return;
497 491
498 if (io->first_clone)
499 bio_put(io->first_clone);
500
501 bio_endio(io->base_bio, io->base_bio->bi_size, io->error); 492 bio_endio(io->base_bio, io->base_bio->bi_size, io->error);
502 493
503 mempool_free(io, cc->io_pool); 494 mempool_free(io, cc->io_pool);
@@ -562,6 +553,7 @@ static void clone_init(struct crypt_io *io, struct bio *clone)
562 clone->bi_end_io = crypt_endio; 553 clone->bi_end_io = crypt_endio;
563 clone->bi_bdev = cc->dev->bdev; 554 clone->bi_bdev = cc->dev->bdev;
564 clone->bi_rw = io->base_bio->bi_rw; 555 clone->bi_rw = io->base_bio->bi_rw;
556 clone->bi_destructor = dm_crypt_bio_destructor;
565} 557}
566 558
567static void process_read(struct crypt_io *io) 559static void process_read(struct crypt_io *io)
@@ -585,7 +577,6 @@ static void process_read(struct crypt_io *io)
585 } 577 }
586 578
587 clone_init(io, clone); 579 clone_init(io, clone);
588 clone->bi_destructor = dm_crypt_bio_destructor;
589 clone->bi_idx = 0; 580 clone->bi_idx = 0;
590 clone->bi_vcnt = bio_segments(base_bio); 581 clone->bi_vcnt = bio_segments(base_bio);
591 clone->bi_size = base_bio->bi_size; 582 clone->bi_size = base_bio->bi_size;
@@ -604,7 +595,6 @@ static void process_write(struct crypt_io *io)
604 struct convert_context ctx; 595 struct convert_context ctx;
605 unsigned remaining = base_bio->bi_size; 596 unsigned remaining = base_bio->bi_size;
606 sector_t sector = base_bio->bi_sector - io->target->begin; 597 sector_t sector = base_bio->bi_sector - io->target->begin;
607 unsigned bvec_idx = 0;
608 598
609 atomic_inc(&io->pending); 599 atomic_inc(&io->pending);
610 600
@@ -615,14 +605,14 @@ static void process_write(struct crypt_io *io)
615 * so repeat the whole process until all the data can be handled. 605 * so repeat the whole process until all the data can be handled.
616 */ 606 */
617 while (remaining) { 607 while (remaining) {
618 clone = crypt_alloc_buffer(cc, base_bio->bi_size, 608 clone = crypt_alloc_buffer(io, remaining);
619 io->first_clone, &bvec_idx);
620 if (unlikely(!clone)) { 609 if (unlikely(!clone)) {
621 dec_pending(io, -ENOMEM); 610 dec_pending(io, -ENOMEM);
622 return; 611 return;
623 } 612 }
624 613
625 ctx.bio_out = clone; 614 ctx.bio_out = clone;
615 ctx.idx_out = 0;
626 616
627 if (unlikely(crypt_convert(cc, &ctx) < 0)) { 617 if (unlikely(crypt_convert(cc, &ctx) < 0)) {
628 crypt_free_buffer_pages(cc, clone, clone->bi_size); 618 crypt_free_buffer_pages(cc, clone, clone->bi_size);
@@ -631,31 +621,26 @@ static void process_write(struct crypt_io *io)
631 return; 621 return;
632 } 622 }
633 623
634 clone_init(io, clone); 624 /* crypt_convert should have filled the clone bio */
635 clone->bi_sector = cc->start + sector; 625 BUG_ON(ctx.idx_out < clone->bi_vcnt);
636
637 if (!io->first_clone) {
638 /*
639 * hold a reference to the first clone, because it
640 * holds the bio_vec array and that can't be freed
641 * before all other clones are released
642 */
643 bio_get(clone);
644 io->first_clone = clone;
645 }
646 626
627 clone->bi_sector = cc->start + sector;
647 remaining -= clone->bi_size; 628 remaining -= clone->bi_size;
648 sector += bio_sectors(clone); 629 sector += bio_sectors(clone);
649 630
650 /* prevent bio_put of first_clone */ 631 /* Grab another reference to the io struct
632 * before we kick off the request */
651 if (remaining) 633 if (remaining)
652 atomic_inc(&io->pending); 634 atomic_inc(&io->pending);
653 635
654 generic_make_request(clone); 636 generic_make_request(clone);
655 637
638 /* Do not reference clone after this - it
639 * may be gone already. */
640
656 /* out of memory -> run queues */ 641 /* out of memory -> run queues */
657 if (remaining) 642 if (remaining)
658 congestion_wait(bio_data_dir(clone), HZ/100); 643 congestion_wait(WRITE, HZ/100);
659 } 644 }
660} 645}
661 646
@@ -832,6 +817,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
832 cc->iv_gen_ops = &crypt_iv_essiv_ops; 817 cc->iv_gen_ops = &crypt_iv_essiv_ops;
833 else if (strcmp(ivmode, "benbi") == 0) 818 else if (strcmp(ivmode, "benbi") == 0)
834 cc->iv_gen_ops = &crypt_iv_benbi_ops; 819 cc->iv_gen_ops = &crypt_iv_benbi_ops;
820 else if (strcmp(ivmode, "null") == 0)
821 cc->iv_gen_ops = &crypt_iv_null_ops;
835 else { 822 else {
836 ti->error = "Invalid IV mode"; 823 ti->error = "Invalid IV mode";
837 goto bad2; 824 goto bad2;
@@ -954,10 +941,12 @@ static int crypt_map(struct dm_target *ti, struct bio *bio,
954 struct crypt_config *cc = ti->private; 941 struct crypt_config *cc = ti->private;
955 struct crypt_io *io; 942 struct crypt_io *io;
956 943
944 if (bio_barrier(bio))
945 return -EOPNOTSUPP;
946
957 io = mempool_alloc(cc->io_pool, GFP_NOIO); 947 io = mempool_alloc(cc->io_pool, GFP_NOIO);
958 io->target = ti; 948 io->target = ti;
959 io->base_bio = bio; 949 io->base_bio = bio;
960 io->first_clone = NULL;
961 io->error = io->post_process = 0; 950 io->error = io->post_process = 0;
962 atomic_set(&io->pending, 0); 951 atomic_set(&io->pending, 0);
963 kcryptd_queue_io(io); 952 kcryptd_queue_io(io);
@@ -1057,7 +1046,7 @@ error:
1057 1046
1058static struct target_type crypt_target = { 1047static struct target_type crypt_target = {
1059 .name = "crypt", 1048 .name = "crypt",
1060 .version= {1, 3, 0}, 1049 .version= {1, 5, 0},
1061 .module = THIS_MODULE, 1050 .module = THIS_MODULE,
1062 .ctr = crypt_ctr, 1051 .ctr = crypt_ctr,
1063 .dtr = crypt_dtr, 1052 .dtr = crypt_dtr,
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
new file mode 100644
index 000000000000..52c7cf9e5803
--- /dev/null
+++ b/drivers/md/dm-delay.c
@@ -0,0 +1,383 @@
1/*
2 * Copyright (C) 2005-2007 Red Hat GmbH
3 *
4 * A target that delays reads and/or writes and can send
5 * them to different devices.
6 *
7 * This file is released under the GPL.
8 */
9
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/blkdev.h>
13#include <linux/bio.h>
14#include <linux/slab.h>
15
16#include "dm.h"
17#include "dm-bio-list.h"
18
19#define DM_MSG_PREFIX "delay"
20
21struct delay_c {
22 struct timer_list delay_timer;
23 struct semaphore timer_lock;
24 struct work_struct flush_expired_bios;
25 struct list_head delayed_bios;
26 atomic_t may_delay;
27 mempool_t *delayed_pool;
28
29 struct dm_dev *dev_read;
30 sector_t start_read;
31 unsigned read_delay;
32 unsigned reads;
33
34 struct dm_dev *dev_write;
35 sector_t start_write;
36 unsigned write_delay;
37 unsigned writes;
38};
39
40struct delay_info {
41 struct delay_c *context;
42 struct list_head list;
43 struct bio *bio;
44 unsigned long expires;
45};
46
47static DEFINE_MUTEX(delayed_bios_lock);
48
49static struct workqueue_struct *kdelayd_wq;
50static struct kmem_cache *delayed_cache;
51
52static void handle_delayed_timer(unsigned long data)
53{
54 struct delay_c *dc = (struct delay_c *)data;
55
56 queue_work(kdelayd_wq, &dc->flush_expired_bios);
57}
58
59static void queue_timeout(struct delay_c *dc, unsigned long expires)
60{
61 down(&dc->timer_lock);
62
63 if (!timer_pending(&dc->delay_timer) || expires < dc->delay_timer.expires)
64 mod_timer(&dc->delay_timer, expires);
65
66 up(&dc->timer_lock);
67}
68
69static void flush_bios(struct bio *bio)
70{
71 struct bio *n;
72
73 while (bio) {
74 n = bio->bi_next;
75 bio->bi_next = NULL;
76 generic_make_request(bio);
77 bio = n;
78 }
79}
80
81static struct bio *flush_delayed_bios(struct delay_c *dc, int flush_all)
82{
83 struct delay_info *delayed, *next;
84 unsigned long next_expires = 0;
85 int start_timer = 0;
86 BIO_LIST(flush_bios);
87
88 mutex_lock(&delayed_bios_lock);
89 list_for_each_entry_safe(delayed, next, &dc->delayed_bios, list) {
90 if (flush_all || time_after_eq(jiffies, delayed->expires)) {
91 list_del(&delayed->list);
92 bio_list_add(&flush_bios, delayed->bio);
93 if ((bio_data_dir(delayed->bio) == WRITE))
94 delayed->context->writes--;
95 else
96 delayed->context->reads--;
97 mempool_free(delayed, dc->delayed_pool);
98 continue;
99 }
100
101 if (!start_timer) {
102 start_timer = 1;
103 next_expires = delayed->expires;
104 } else
105 next_expires = min(next_expires, delayed->expires);
106 }
107
108 mutex_unlock(&delayed_bios_lock);
109
110 if (start_timer)
111 queue_timeout(dc, next_expires);
112
113 return bio_list_get(&flush_bios);
114}
115
116static void flush_expired_bios(struct work_struct *work)
117{
118 struct delay_c *dc;
119
120 dc = container_of(work, struct delay_c, flush_expired_bios);
121 flush_bios(flush_delayed_bios(dc, 0));
122}
123
124/*
125 * Mapping parameters:
126 * <device> <offset> <delay> [<write_device> <write_offset> <write_delay>]
127 *
128 * With separate write parameters, the first set is only used for reads.
129 * Delays are specified in milliseconds.
130 */
131static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
132{
133 struct delay_c *dc;
134 unsigned long long tmpll;
135
136 if (argc != 3 && argc != 6) {
137 ti->error = "requires exactly 3 or 6 arguments";
138 return -EINVAL;
139 }
140
141 dc = kmalloc(sizeof(*dc), GFP_KERNEL);
142 if (!dc) {
143 ti->error = "Cannot allocate context";
144 return -ENOMEM;
145 }
146
147 dc->reads = dc->writes = 0;
148
149 if (sscanf(argv[1], "%llu", &tmpll) != 1) {
150 ti->error = "Invalid device sector";
151 goto bad;
152 }
153 dc->start_read = tmpll;
154
155 if (sscanf(argv[2], "%u", &dc->read_delay) != 1) {
156 ti->error = "Invalid delay";
157 goto bad;
158 }
159
160 if (dm_get_device(ti, argv[0], dc->start_read, ti->len,
161 dm_table_get_mode(ti->table), &dc->dev_read)) {
162 ti->error = "Device lookup failed";
163 goto bad;
164 }
165
166 if (argc == 3) {
167 dc->dev_write = NULL;
168 goto out;
169 }
170
171 if (sscanf(argv[4], "%llu", &tmpll) != 1) {
172 ti->error = "Invalid write device sector";
173 goto bad;
174 }
175 dc->start_write = tmpll;
176
177 if (sscanf(argv[5], "%u", &dc->write_delay) != 1) {
178 ti->error = "Invalid write delay";
179 goto bad;
180 }
181
182 if (dm_get_device(ti, argv[3], dc->start_write, ti->len,
183 dm_table_get_mode(ti->table), &dc->dev_write)) {
184 ti->error = "Write device lookup failed";
185 dm_put_device(ti, dc->dev_read);
186 goto bad;
187 }
188
189out:
190 dc->delayed_pool = mempool_create_slab_pool(128, delayed_cache);
191 if (!dc->delayed_pool) {
192 DMERR("Couldn't create delayed bio pool.");
193 goto bad;
194 }
195
196 init_timer(&dc->delay_timer);
197 dc->delay_timer.function = handle_delayed_timer;
198 dc->delay_timer.data = (unsigned long)dc;
199
200 INIT_WORK(&dc->flush_expired_bios, flush_expired_bios);
201 INIT_LIST_HEAD(&dc->delayed_bios);
202 init_MUTEX(&dc->timer_lock);
203 atomic_set(&dc->may_delay, 1);
204
205 ti->private = dc;
206 return 0;
207
208bad:
209 kfree(dc);
210 return -EINVAL;
211}
212
213static void delay_dtr(struct dm_target *ti)
214{
215 struct delay_c *dc = ti->private;
216
217 flush_workqueue(kdelayd_wq);
218
219 dm_put_device(ti, dc->dev_read);
220
221 if (dc->dev_write)
222 dm_put_device(ti, dc->dev_write);
223
224 mempool_destroy(dc->delayed_pool);
225 kfree(dc);
226}
227
228static int delay_bio(struct delay_c *dc, int delay, struct bio *bio)
229{
230 struct delay_info *delayed;
231 unsigned long expires = 0;
232
233 if (!delay || !atomic_read(&dc->may_delay))
234 return 1;
235
236 delayed = mempool_alloc(dc->delayed_pool, GFP_NOIO);
237
238 delayed->context = dc;
239 delayed->bio = bio;
240 delayed->expires = expires = jiffies + (delay * HZ / 1000);
241
242 mutex_lock(&delayed_bios_lock);
243
244 if (bio_data_dir(bio) == WRITE)
245 dc->writes++;
246 else
247 dc->reads++;
248
249 list_add_tail(&delayed->list, &dc->delayed_bios);
250
251 mutex_unlock(&delayed_bios_lock);
252
253 queue_timeout(dc, expires);
254
255 return 0;
256}
257
258static void delay_presuspend(struct dm_target *ti)
259{
260 struct delay_c *dc = ti->private;
261
262 atomic_set(&dc->may_delay, 0);
263 del_timer_sync(&dc->delay_timer);
264 flush_bios(flush_delayed_bios(dc, 1));
265}
266
267static void delay_resume(struct dm_target *ti)
268{
269 struct delay_c *dc = ti->private;
270
271 atomic_set(&dc->may_delay, 1);
272}
273
274static int delay_map(struct dm_target *ti, struct bio *bio,
275 union map_info *map_context)
276{
277 struct delay_c *dc = ti->private;
278
279 if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) {
280 bio->bi_bdev = dc->dev_write->bdev;
281 bio->bi_sector = dc->start_write +
282 (bio->bi_sector - ti->begin);
283
284 return delay_bio(dc, dc->write_delay, bio);
285 }
286
287 bio->bi_bdev = dc->dev_read->bdev;
288 bio->bi_sector = dc->start_read +
289 (bio->bi_sector - ti->begin);
290
291 return delay_bio(dc, dc->read_delay, bio);
292}
293
294static int delay_status(struct dm_target *ti, status_type_t type,
295 char *result, unsigned maxlen)
296{
297 struct delay_c *dc = ti->private;
298 int sz = 0;
299
300 switch (type) {
301 case STATUSTYPE_INFO:
302 DMEMIT("%u %u", dc->reads, dc->writes);
303 break;
304
305 case STATUSTYPE_TABLE:
306 DMEMIT("%s %llu %u", dc->dev_read->name,
307 (unsigned long long) dc->start_read,
308 dc->read_delay);
309 if (dc->dev_write)
310 DMEMIT("%s %llu %u", dc->dev_write->name,
311 (unsigned long long) dc->start_write,
312 dc->write_delay);
313 break;
314 }
315
316 return 0;
317}
318
319static struct target_type delay_target = {
320 .name = "delay",
321 .version = {1, 0, 2},
322 .module = THIS_MODULE,
323 .ctr = delay_ctr,
324 .dtr = delay_dtr,
325 .map = delay_map,
326 .presuspend = delay_presuspend,
327 .resume = delay_resume,
328 .status = delay_status,
329};
330
331static int __init dm_delay_init(void)
332{
333 int r = -ENOMEM;
334
335 kdelayd_wq = create_workqueue("kdelayd");
336 if (!kdelayd_wq) {
337 DMERR("Couldn't start kdelayd");
338 goto bad_queue;
339 }
340
341 delayed_cache = kmem_cache_create("dm-delay",
342 sizeof(struct delay_info),
343 __alignof__(struct delay_info),
344 0, NULL, NULL);
345 if (!delayed_cache) {
346 DMERR("Couldn't create delayed bio cache.");
347 goto bad_memcache;
348 }
349
350 r = dm_register_target(&delay_target);
351 if (r < 0) {
352 DMERR("register failed %d", r);
353 goto bad_register;
354 }
355
356 return 0;
357
358bad_register:
359 kmem_cache_destroy(delayed_cache);
360bad_memcache:
361 destroy_workqueue(kdelayd_wq);
362bad_queue:
363 return r;
364}
365
366static void __exit dm_delay_exit(void)
367{
368 int r = dm_unregister_target(&delay_target);
369
370 if (r < 0)
371 DMERR("unregister failed %d", r);
372
373 kmem_cache_destroy(delayed_cache);
374 destroy_workqueue(kdelayd_wq);
375}
376
377/* Module hooks */
378module_init(dm_delay_init);
379module_exit(dm_delay_exit);
380
381MODULE_DESCRIPTION(DM_NAME " delay target");
382MODULE_AUTHOR("Heinz Mauelshagen <mauelshagen@redhat.com>");
383MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index 99cdffa7fbfe..07e0a0c84f6e 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * dm-snapshot.c 2 * dm-exception-store.c
3 * 3 *
4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited. 4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
5 * Copyright (C) 2006 Red Hat GmbH
5 * 6 *
6 * This file is released under the GPL. 7 * This file is released under the GPL.
7 */ 8 */
@@ -123,6 +124,7 @@ struct pstore {
123 atomic_t pending_count; 124 atomic_t pending_count;
124 uint32_t callback_count; 125 uint32_t callback_count;
125 struct commit_callback *callbacks; 126 struct commit_callback *callbacks;
127 struct dm_io_client *io_client;
126}; 128};
127 129
128static inline unsigned int sectors_to_pages(unsigned int sectors) 130static inline unsigned int sectors_to_pages(unsigned int sectors)
@@ -159,14 +161,20 @@ static void free_area(struct pstore *ps)
159 */ 161 */
160static int chunk_io(struct pstore *ps, uint32_t chunk, int rw) 162static int chunk_io(struct pstore *ps, uint32_t chunk, int rw)
161{ 163{
162 struct io_region where; 164 struct io_region where = {
163 unsigned long bits; 165 .bdev = ps->snap->cow->bdev,
164 166 .sector = ps->snap->chunk_size * chunk,
165 where.bdev = ps->snap->cow->bdev; 167 .count = ps->snap->chunk_size,
166 where.sector = ps->snap->chunk_size * chunk; 168 };
167 where.count = ps->snap->chunk_size; 169 struct dm_io_request io_req = {
168 170 .bi_rw = rw,
169 return dm_io_sync_vm(1, &where, rw, ps->area, &bits); 171 .mem.type = DM_IO_VMA,
172 .mem.ptr.vma = ps->area,
173 .client = ps->io_client,
174 .notify.fn = NULL,
175 };
176
177 return dm_io(&io_req, 1, &where, NULL);
170} 178}
171 179
172/* 180/*
@@ -213,17 +221,18 @@ static int read_header(struct pstore *ps, int *new_snapshot)
213 chunk_size_supplied = 0; 221 chunk_size_supplied = 0;
214 } 222 }
215 223
216 r = dm_io_get(sectors_to_pages(ps->snap->chunk_size)); 224 ps->io_client = dm_io_client_create(sectors_to_pages(ps->snap->
217 if (r) 225 chunk_size));
218 return r; 226 if (IS_ERR(ps->io_client))
227 return PTR_ERR(ps->io_client);
219 228
220 r = alloc_area(ps); 229 r = alloc_area(ps);
221 if (r) 230 if (r)
222 goto bad1; 231 return r;
223 232
224 r = chunk_io(ps, 0, READ); 233 r = chunk_io(ps, 0, READ);
225 if (r) 234 if (r)
226 goto bad2; 235 goto bad;
227 236
228 dh = (struct disk_header *) ps->area; 237 dh = (struct disk_header *) ps->area;
229 238
@@ -235,7 +244,7 @@ static int read_header(struct pstore *ps, int *new_snapshot)
235 if (le32_to_cpu(dh->magic) != SNAP_MAGIC) { 244 if (le32_to_cpu(dh->magic) != SNAP_MAGIC) {
236 DMWARN("Invalid or corrupt snapshot"); 245 DMWARN("Invalid or corrupt snapshot");
237 r = -ENXIO; 246 r = -ENXIO;
238 goto bad2; 247 goto bad;
239 } 248 }
240 249
241 *new_snapshot = 0; 250 *new_snapshot = 0;
@@ -252,27 +261,22 @@ static int read_header(struct pstore *ps, int *new_snapshot)
252 (unsigned long long)ps->snap->chunk_size); 261 (unsigned long long)ps->snap->chunk_size);
253 262
254 /* We had a bogus chunk_size. Fix stuff up. */ 263 /* We had a bogus chunk_size. Fix stuff up. */
255 dm_io_put(sectors_to_pages(ps->snap->chunk_size));
256 free_area(ps); 264 free_area(ps);
257 265
258 ps->snap->chunk_size = chunk_size; 266 ps->snap->chunk_size = chunk_size;
259 ps->snap->chunk_mask = chunk_size - 1; 267 ps->snap->chunk_mask = chunk_size - 1;
260 ps->snap->chunk_shift = ffs(chunk_size) - 1; 268 ps->snap->chunk_shift = ffs(chunk_size) - 1;
261 269
262 r = dm_io_get(sectors_to_pages(chunk_size)); 270 r = dm_io_client_resize(sectors_to_pages(ps->snap->chunk_size),
271 ps->io_client);
263 if (r) 272 if (r)
264 return r; 273 return r;
265 274
266 r = alloc_area(ps); 275 r = alloc_area(ps);
267 if (r) 276 return r;
268 goto bad1;
269
270 return 0;
271 277
272bad2: 278bad:
273 free_area(ps); 279 free_area(ps);
274bad1:
275 dm_io_put(sectors_to_pages(ps->snap->chunk_size));
276 return r; 280 return r;
277} 281}
278 282
@@ -405,7 +409,7 @@ static void persistent_destroy(struct exception_store *store)
405{ 409{
406 struct pstore *ps = get_info(store); 410 struct pstore *ps = get_info(store);
407 411
408 dm_io_put(sectors_to_pages(ps->snap->chunk_size)); 412 dm_io_client_destroy(ps->io_client);
409 vfree(ps->callbacks); 413 vfree(ps->callbacks);
410 free_area(ps); 414 free_area(ps);
411 kfree(ps); 415 kfree(ps);
diff --git a/drivers/md/dm-hw-handler.h b/drivers/md/dm-hw-handler.h
index 32eff28e4adc..e0832e6fcf36 100644
--- a/drivers/md/dm-hw-handler.h
+++ b/drivers/md/dm-hw-handler.h
@@ -16,6 +16,7 @@
16struct hw_handler_type; 16struct hw_handler_type;
17struct hw_handler { 17struct hw_handler {
18 struct hw_handler_type *type; 18 struct hw_handler_type *type;
19 struct mapped_device *md;
19 void *context; 20 void *context;
20}; 21};
21 22
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 8bdc8a87b249..352c6fbeac53 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (C) 2003 Sistina Software 2 * Copyright (C) 2003 Sistina Software
3 * Copyright (C) 2006 Red Hat GmbH
3 * 4 *
4 * This file is released under the GPL. 5 * This file is released under the GPL.
5 */ 6 */
@@ -12,13 +13,17 @@
12#include <linux/sched.h> 13#include <linux/sched.h>
13#include <linux/slab.h> 14#include <linux/slab.h>
14 15
15static struct bio_set *_bios; 16struct dm_io_client {
17 mempool_t *pool;
18 struct bio_set *bios;
19};
16 20
17/* FIXME: can we shrink this ? */ 21/* FIXME: can we shrink this ? */
18struct io { 22struct io {
19 unsigned long error; 23 unsigned long error;
20 atomic_t count; 24 atomic_t count;
21 struct task_struct *sleeper; 25 struct task_struct *sleeper;
26 struct dm_io_client *client;
22 io_notify_fn callback; 27 io_notify_fn callback;
23 void *context; 28 void *context;
24}; 29};
@@ -26,63 +31,58 @@ struct io {
26/* 31/*
27 * io contexts are only dynamically allocated for asynchronous 32 * io contexts are only dynamically allocated for asynchronous
28 * io. Since async io is likely to be the majority of io we'll 33 * io. Since async io is likely to be the majority of io we'll
29 * have the same number of io contexts as buffer heads ! (FIXME: 34 * have the same number of io contexts as bios! (FIXME: must reduce this).
30 * must reduce this).
31 */ 35 */
32static unsigned _num_ios;
33static mempool_t *_io_pool;
34 36
35static unsigned int pages_to_ios(unsigned int pages) 37static unsigned int pages_to_ios(unsigned int pages)
36{ 38{
37 return 4 * pages; /* too many ? */ 39 return 4 * pages; /* too many ? */
38} 40}
39 41
40static int resize_pool(unsigned int new_ios) 42/*
43 * Create a client with mempool and bioset.
44 */
45struct dm_io_client *dm_io_client_create(unsigned num_pages)
41{ 46{
42 int r = 0; 47 unsigned ios = pages_to_ios(num_pages);
43 48 struct dm_io_client *client;
44 if (_io_pool) {
45 if (new_ios == 0) {
46 /* free off the pool */
47 mempool_destroy(_io_pool);
48 _io_pool = NULL;
49 bioset_free(_bios);
50
51 } else {
52 /* resize the pool */
53 r = mempool_resize(_io_pool, new_ios, GFP_KERNEL);
54 }
55 49
56 } else { 50 client = kmalloc(sizeof(*client), GFP_KERNEL);
57 /* create new pool */ 51 if (!client)
58 _io_pool = mempool_create_kmalloc_pool(new_ios, 52 return ERR_PTR(-ENOMEM);
59 sizeof(struct io)); 53
60 if (!_io_pool) 54 client->pool = mempool_create_kmalloc_pool(ios, sizeof(struct io));
61 return -ENOMEM; 55 if (!client->pool)
62 56 goto bad;
63 _bios = bioset_create(16, 16);
64 if (!_bios) {
65 mempool_destroy(_io_pool);
66 _io_pool = NULL;
67 return -ENOMEM;
68 }
69 }
70 57
71 if (!r) 58 client->bios = bioset_create(16, 16);
72 _num_ios = new_ios; 59 if (!client->bios)
60 goto bad;
73 61
74 return r; 62 return client;
63
64 bad:
65 if (client->pool)
66 mempool_destroy(client->pool);
67 kfree(client);
68 return ERR_PTR(-ENOMEM);
75} 69}
70EXPORT_SYMBOL(dm_io_client_create);
76 71
77int dm_io_get(unsigned int num_pages) 72int dm_io_client_resize(unsigned num_pages, struct dm_io_client *client)
78{ 73{
79 return resize_pool(_num_ios + pages_to_ios(num_pages)); 74 return mempool_resize(client->pool, pages_to_ios(num_pages),
75 GFP_KERNEL);
80} 76}
77EXPORT_SYMBOL(dm_io_client_resize);
81 78
82void dm_io_put(unsigned int num_pages) 79void dm_io_client_destroy(struct dm_io_client *client)
83{ 80{
84 resize_pool(_num_ios - pages_to_ios(num_pages)); 81 mempool_destroy(client->pool);
82 bioset_free(client->bios);
83 kfree(client);
85} 84}
85EXPORT_SYMBOL(dm_io_client_destroy);
86 86
87/*----------------------------------------------------------------- 87/*-----------------------------------------------------------------
88 * We need to keep track of which region a bio is doing io for. 88 * We need to keep track of which region a bio is doing io for.
@@ -118,7 +118,7 @@ static void dec_count(struct io *io, unsigned int region, int error)
118 io_notify_fn fn = io->callback; 118 io_notify_fn fn = io->callback;
119 void *context = io->context; 119 void *context = io->context;
120 120
121 mempool_free(io, _io_pool); 121 mempool_free(io, io->client->pool);
122 fn(r, context); 122 fn(r, context);
123 } 123 }
124 } 124 }
@@ -126,7 +126,8 @@ static void dec_count(struct io *io, unsigned int region, int error)
126 126
127static int endio(struct bio *bio, unsigned int done, int error) 127static int endio(struct bio *bio, unsigned int done, int error)
128{ 128{
129 struct io *io = (struct io *) bio->bi_private; 129 struct io *io;
130 unsigned region;
130 131
131 /* keep going until we've finished */ 132 /* keep going until we've finished */
132 if (bio->bi_size) 133 if (bio->bi_size)
@@ -135,10 +136,17 @@ static int endio(struct bio *bio, unsigned int done, int error)
135 if (error && bio_data_dir(bio) == READ) 136 if (error && bio_data_dir(bio) == READ)
136 zero_fill_bio(bio); 137 zero_fill_bio(bio);
137 138
138 dec_count(io, bio_get_region(bio), error); 139 /*
140 * The bio destructor in bio_put() may use the io object.
141 */
142 io = bio->bi_private;
143 region = bio_get_region(bio);
144
139 bio->bi_max_vecs++; 145 bio->bi_max_vecs++;
140 bio_put(bio); 146 bio_put(bio);
141 147
148 dec_count(io, region, error);
149
142 return 0; 150 return 0;
143} 151}
144 152
@@ -209,6 +217,9 @@ static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec)
209 dp->context_ptr = bvec; 217 dp->context_ptr = bvec;
210} 218}
211 219
220/*
221 * Functions for getting the pages from a VMA.
222 */
212static void vm_get_page(struct dpages *dp, 223static void vm_get_page(struct dpages *dp,
213 struct page **p, unsigned long *len, unsigned *offset) 224 struct page **p, unsigned long *len, unsigned *offset)
214{ 225{
@@ -233,7 +244,34 @@ static void vm_dp_init(struct dpages *dp, void *data)
233 244
234static void dm_bio_destructor(struct bio *bio) 245static void dm_bio_destructor(struct bio *bio)
235{ 246{
236 bio_free(bio, _bios); 247 struct io *io = bio->bi_private;
248
249 bio_free(bio, io->client->bios);
250}
251
252/*
253 * Functions for getting the pages from kernel memory.
254 */
255static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
256 unsigned *offset)
257{
258 *p = virt_to_page(dp->context_ptr);
259 *offset = dp->context_u;
260 *len = PAGE_SIZE - dp->context_u;
261}
262
263static void km_next_page(struct dpages *dp)
264{
265 dp->context_ptr += PAGE_SIZE - dp->context_u;
266 dp->context_u = 0;
267}
268
269static void km_dp_init(struct dpages *dp, void *data)
270{
271 dp->get_page = km_get_page;
272 dp->next_page = km_next_page;
273 dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
274 dp->context_ptr = data;
237} 275}
238 276
239/*----------------------------------------------------------------- 277/*-----------------------------------------------------------------
@@ -256,7 +294,7 @@ static void do_region(int rw, unsigned int region, struct io_region *where,
256 * to hide it from bio_add_page(). 294 * to hide it from bio_add_page().
257 */ 295 */
258 num_bvecs = (remaining / (PAGE_SIZE >> SECTOR_SHIFT)) + 2; 296 num_bvecs = (remaining / (PAGE_SIZE >> SECTOR_SHIFT)) + 2;
259 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, _bios); 297 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
260 bio->bi_sector = where->sector + (where->count - remaining); 298 bio->bi_sector = where->sector + (where->count - remaining);
261 bio->bi_bdev = where->bdev; 299 bio->bi_bdev = where->bdev;
262 bio->bi_end_io = endio; 300 bio->bi_end_io = endio;
@@ -311,8 +349,9 @@ static void dispatch_io(int rw, unsigned int num_regions,
311 dec_count(io, 0, 0); 349 dec_count(io, 0, 0);
312} 350}
313 351
314static int sync_io(unsigned int num_regions, struct io_region *where, 352static int sync_io(struct dm_io_client *client, unsigned int num_regions,
315 int rw, struct dpages *dp, unsigned long *error_bits) 353 struct io_region *where, int rw, struct dpages *dp,
354 unsigned long *error_bits)
316{ 355{
317 struct io io; 356 struct io io;
318 357
@@ -324,6 +363,7 @@ static int sync_io(unsigned int num_regions, struct io_region *where,
324 io.error = 0; 363 io.error = 0;
325 atomic_set(&io.count, 1); /* see dispatch_io() */ 364 atomic_set(&io.count, 1); /* see dispatch_io() */
326 io.sleeper = current; 365 io.sleeper = current;
366 io.client = client;
327 367
328 dispatch_io(rw, num_regions, where, dp, &io, 1); 368 dispatch_io(rw, num_regions, where, dp, &io, 1);
329 369
@@ -340,12 +380,15 @@ static int sync_io(unsigned int num_regions, struct io_region *where,
340 if (atomic_read(&io.count)) 380 if (atomic_read(&io.count))
341 return -EINTR; 381 return -EINTR;
342 382
343 *error_bits = io.error; 383 if (error_bits)
384 *error_bits = io.error;
385
344 return io.error ? -EIO : 0; 386 return io.error ? -EIO : 0;
345} 387}
346 388
347static int async_io(unsigned int num_regions, struct io_region *where, int rw, 389static int async_io(struct dm_io_client *client, unsigned int num_regions,
348 struct dpages *dp, io_notify_fn fn, void *context) 390 struct io_region *where, int rw, struct dpages *dp,
391 io_notify_fn fn, void *context)
349{ 392{
350 struct io *io; 393 struct io *io;
351 394
@@ -355,10 +398,11 @@ static int async_io(unsigned int num_regions, struct io_region *where, int rw,
355 return -EIO; 398 return -EIO;
356 } 399 }
357 400
358 io = mempool_alloc(_io_pool, GFP_NOIO); 401 io = mempool_alloc(client->pool, GFP_NOIO);
359 io->error = 0; 402 io->error = 0;
360 atomic_set(&io->count, 1); /* see dispatch_io() */ 403 atomic_set(&io->count, 1); /* see dispatch_io() */
361 io->sleeper = NULL; 404 io->sleeper = NULL;
405 io->client = client;
362 io->callback = fn; 406 io->callback = fn;
363 io->context = context; 407 io->context = context;
364 408
@@ -366,61 +410,51 @@ static int async_io(unsigned int num_regions, struct io_region *where, int rw,
366 return 0; 410 return 0;
367} 411}
368 412
369int dm_io_sync(unsigned int num_regions, struct io_region *where, int rw, 413static int dp_init(struct dm_io_request *io_req, struct dpages *dp)
370 struct page_list *pl, unsigned int offset,
371 unsigned long *error_bits)
372{ 414{
373 struct dpages dp; 415 /* Set up dpages based on memory type */
374 list_dp_init(&dp, pl, offset); 416 switch (io_req->mem.type) {
375 return sync_io(num_regions, where, rw, &dp, error_bits); 417 case DM_IO_PAGE_LIST:
376} 418 list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
419 break;
420
421 case DM_IO_BVEC:
422 bvec_dp_init(dp, io_req->mem.ptr.bvec);
423 break;
424
425 case DM_IO_VMA:
426 vm_dp_init(dp, io_req->mem.ptr.vma);
427 break;
428
429 case DM_IO_KMEM:
430 km_dp_init(dp, io_req->mem.ptr.addr);
431 break;
432
433 default:
434 return -EINVAL;
435 }
377 436
378int dm_io_sync_bvec(unsigned int num_regions, struct io_region *where, int rw, 437 return 0;
379 struct bio_vec *bvec, unsigned long *error_bits)
380{
381 struct dpages dp;
382 bvec_dp_init(&dp, bvec);
383 return sync_io(num_regions, where, rw, &dp, error_bits);
384} 438}
385 439
386int dm_io_sync_vm(unsigned int num_regions, struct io_region *where, int rw, 440/*
387 void *data, unsigned long *error_bits) 441 * New collapsed (a)synchronous interface
442 */
443int dm_io(struct dm_io_request *io_req, unsigned num_regions,
444 struct io_region *where, unsigned long *sync_error_bits)
388{ 445{
446 int r;
389 struct dpages dp; 447 struct dpages dp;
390 vm_dp_init(&dp, data);
391 return sync_io(num_regions, where, rw, &dp, error_bits);
392}
393 448
394int dm_io_async(unsigned int num_regions, struct io_region *where, int rw, 449 r = dp_init(io_req, &dp);
395 struct page_list *pl, unsigned int offset, 450 if (r)
396 io_notify_fn fn, void *context) 451 return r;
397{
398 struct dpages dp;
399 list_dp_init(&dp, pl, offset);
400 return async_io(num_regions, where, rw, &dp, fn, context);
401}
402 452
403int dm_io_async_bvec(unsigned int num_regions, struct io_region *where, int rw, 453 if (!io_req->notify.fn)
404 struct bio_vec *bvec, io_notify_fn fn, void *context) 454 return sync_io(io_req->client, num_regions, where,
405{ 455 io_req->bi_rw, &dp, sync_error_bits);
406 struct dpages dp;
407 bvec_dp_init(&dp, bvec);
408 return async_io(num_regions, where, rw, &dp, fn, context);
409}
410 456
411int dm_io_async_vm(unsigned int num_regions, struct io_region *where, int rw, 457 return async_io(io_req->client, num_regions, where, io_req->bi_rw,
412 void *data, io_notify_fn fn, void *context) 458 &dp, io_req->notify.fn, io_req->notify.context);
413{
414 struct dpages dp;
415 vm_dp_init(&dp, data);
416 return async_io(num_regions, where, rw, &dp, fn, context);
417} 459}
418 460EXPORT_SYMBOL(dm_io);
419EXPORT_SYMBOL(dm_io_get);
420EXPORT_SYMBOL(dm_io_put);
421EXPORT_SYMBOL(dm_io_sync);
422EXPORT_SYMBOL(dm_io_async);
423EXPORT_SYMBOL(dm_io_sync_bvec);
424EXPORT_SYMBOL(dm_io_async_bvec);
425EXPORT_SYMBOL(dm_io_sync_vm);
426EXPORT_SYMBOL(dm_io_async_vm);
diff --git a/drivers/md/dm-io.h b/drivers/md/dm-io.h
index f9035bfd1a9f..f647e2cceaa6 100644
--- a/drivers/md/dm-io.h
+++ b/drivers/md/dm-io.h
@@ -12,7 +12,7 @@
12struct io_region { 12struct io_region {
13 struct block_device *bdev; 13 struct block_device *bdev;
14 sector_t sector; 14 sector_t sector;
15 sector_t count; 15 sector_t count; /* If this is zero the region is ignored. */
16}; 16};
17 17
18struct page_list { 18struct page_list {
@@ -20,55 +20,60 @@ struct page_list {
20 struct page *page; 20 struct page *page;
21}; 21};
22 22
23
24/*
25 * 'error' is a bitset, with each bit indicating whether an error
26 * occurred doing io to the corresponding region.
27 */
28typedef void (*io_notify_fn)(unsigned long error, void *context); 23typedef void (*io_notify_fn)(unsigned long error, void *context);
29 24
25enum dm_io_mem_type {
26 DM_IO_PAGE_LIST,/* Page list */
27 DM_IO_BVEC, /* Bio vector */
28 DM_IO_VMA, /* Virtual memory area */
29 DM_IO_KMEM, /* Kernel memory */
30};
31
32struct dm_io_memory {
33 enum dm_io_mem_type type;
34
35 union {
36 struct page_list *pl;
37 struct bio_vec *bvec;
38 void *vma;
39 void *addr;
40 } ptr;
41
42 unsigned offset;
43};
44
45struct dm_io_notify {
46 io_notify_fn fn; /* Callback for asynchronous requests */
47 void *context; /* Passed to callback */
48};
30 49
31/* 50/*
32 * Before anyone uses the IO interface they should call 51 * IO request structure
33 * dm_io_get(), specifying roughly how many pages they are
34 * expecting to perform io on concurrently.
35 *
36 * This function may block.
37 */ 52 */
38int dm_io_get(unsigned int num_pages); 53struct dm_io_client;
39void dm_io_put(unsigned int num_pages); 54struct dm_io_request {
55 int bi_rw; /* READ|WRITE - not READA */
56 struct dm_io_memory mem; /* Memory to use for io */
57 struct dm_io_notify notify; /* Synchronous if notify.fn is NULL */
58 struct dm_io_client *client; /* Client memory handler */
59};
40 60
41/* 61/*
42 * Synchronous IO. 62 * For async io calls, users can alternatively use the dm_io() function below
63 * and dm_io_client_create() to create private mempools for the client.
43 * 64 *
44 * Please ensure that the rw flag in the next two functions is 65 * Create/destroy may block.
45 * either READ or WRITE, ie. we don't take READA. Any
46 * regions with a zero count field will be ignored.
47 */ 66 */
48int dm_io_sync(unsigned int num_regions, struct io_region *where, int rw, 67struct dm_io_client *dm_io_client_create(unsigned num_pages);
49 struct page_list *pl, unsigned int offset, 68int dm_io_client_resize(unsigned num_pages, struct dm_io_client *client);
50 unsigned long *error_bits); 69void dm_io_client_destroy(struct dm_io_client *client);
51
52int dm_io_sync_bvec(unsigned int num_regions, struct io_region *where, int rw,
53 struct bio_vec *bvec, unsigned long *error_bits);
54
55int dm_io_sync_vm(unsigned int num_regions, struct io_region *where, int rw,
56 void *data, unsigned long *error_bits);
57 70
58/* 71/*
59 * Aynchronous IO. 72 * IO interface using private per-client pools.
60 * 73 * Each bit in the optional 'sync_error_bits' bitset indicates whether an
61 * The 'where' array may be safely allocated on the stack since 74 * error occurred doing io to the corresponding region.
62 * the function takes a copy.
63 */ 75 */
64int dm_io_async(unsigned int num_regions, struct io_region *where, int rw, 76int dm_io(struct dm_io_request *io_req, unsigned num_regions,
65 struct page_list *pl, unsigned int offset, 77 struct io_region *region, unsigned long *sync_error_bits);
66 io_notify_fn fn, void *context);
67
68int dm_io_async_bvec(unsigned int num_regions, struct io_region *where, int rw,
69 struct bio_vec *bvec, io_notify_fn fn, void *context);
70
71int dm_io_async_vm(unsigned int num_regions, struct io_region *where, int rw,
72 void *data, io_notify_fn fn, void *context);
73 78
74#endif 79#endif
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 6a9261351848..a66428d860fe 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -149,9 +149,12 @@ struct log_c {
149 FORCESYNC, /* Force a sync to happen */ 149 FORCESYNC, /* Force a sync to happen */
150 } sync; 150 } sync;
151 151
152 struct dm_io_request io_req;
153
152 /* 154 /*
153 * Disk log fields 155 * Disk log fields
154 */ 156 */
157 int log_dev_failed;
155 struct dm_dev *log_dev; 158 struct dm_dev *log_dev;
156 struct log_header header; 159 struct log_header header;
157 160
@@ -199,13 +202,20 @@ static void header_from_disk(struct log_header *core, struct log_header *disk)
199 core->nr_regions = le64_to_cpu(disk->nr_regions); 202 core->nr_regions = le64_to_cpu(disk->nr_regions);
200} 203}
201 204
205static int rw_header(struct log_c *lc, int rw)
206{
207 lc->io_req.bi_rw = rw;
208 lc->io_req.mem.ptr.vma = lc->disk_header;
209 lc->io_req.notify.fn = NULL;
210
211 return dm_io(&lc->io_req, 1, &lc->header_location, NULL);
212}
213
202static int read_header(struct log_c *log) 214static int read_header(struct log_c *log)
203{ 215{
204 int r; 216 int r;
205 unsigned long ebits;
206 217
207 r = dm_io_sync_vm(1, &log->header_location, READ, 218 r = rw_header(log, READ);
208 log->disk_header, &ebits);
209 if (r) 219 if (r)
210 return r; 220 return r;
211 221
@@ -233,11 +243,8 @@ static int read_header(struct log_c *log)
233 243
234static inline int write_header(struct log_c *log) 244static inline int write_header(struct log_c *log)
235{ 245{
236 unsigned long ebits;
237
238 header_to_disk(&log->header, log->disk_header); 246 header_to_disk(&log->header, log->disk_header);
239 return dm_io_sync_vm(1, &log->header_location, WRITE, 247 return rw_header(log, WRITE);
240 log->disk_header, &ebits);
241} 248}
242 249
243/*---------------------------------------------------------------- 250/*----------------------------------------------------------------
@@ -256,6 +263,7 @@ static int create_log_context(struct dirty_log *log, struct dm_target *ti,
256 uint32_t region_size; 263 uint32_t region_size;
257 unsigned int region_count; 264 unsigned int region_count;
258 size_t bitset_size, buf_size; 265 size_t bitset_size, buf_size;
266 int r;
259 267
260 if (argc < 1 || argc > 2) { 268 if (argc < 1 || argc > 2) {
261 DMWARN("wrong number of arguments to mirror log"); 269 DMWARN("wrong number of arguments to mirror log");
@@ -315,6 +323,7 @@ static int create_log_context(struct dirty_log *log, struct dm_target *ti,
315 lc->disk_header = NULL; 323 lc->disk_header = NULL;
316 } else { 324 } else {
317 lc->log_dev = dev; 325 lc->log_dev = dev;
326 lc->log_dev_failed = 0;
318 lc->header_location.bdev = lc->log_dev->bdev; 327 lc->header_location.bdev = lc->log_dev->bdev;
319 lc->header_location.sector = 0; 328 lc->header_location.sector = 0;
320 329
@@ -324,6 +333,15 @@ static int create_log_context(struct dirty_log *log, struct dm_target *ti,
324 buf_size = dm_round_up((LOG_OFFSET << SECTOR_SHIFT) + 333 buf_size = dm_round_up((LOG_OFFSET << SECTOR_SHIFT) +
325 bitset_size, ti->limits.hardsect_size); 334 bitset_size, ti->limits.hardsect_size);
326 lc->header_location.count = buf_size >> SECTOR_SHIFT; 335 lc->header_location.count = buf_size >> SECTOR_SHIFT;
336 lc->io_req.mem.type = DM_IO_VMA;
337 lc->io_req.client = dm_io_client_create(dm_div_up(buf_size,
338 PAGE_SIZE));
339 if (IS_ERR(lc->io_req.client)) {
340 r = PTR_ERR(lc->io_req.client);
341 DMWARN("couldn't allocate disk io client");
342 kfree(lc);
343 return -ENOMEM;
344 }
327 345
328 lc->disk_header = vmalloc(buf_size); 346 lc->disk_header = vmalloc(buf_size);
329 if (!lc->disk_header) { 347 if (!lc->disk_header) {
@@ -424,6 +442,7 @@ static void disk_dtr(struct dirty_log *log)
424 442
425 dm_put_device(lc->ti, lc->log_dev); 443 dm_put_device(lc->ti, lc->log_dev);
426 vfree(lc->disk_header); 444 vfree(lc->disk_header);
445 dm_io_client_destroy(lc->io_req.client);
427 destroy_log_context(lc); 446 destroy_log_context(lc);
428} 447}
429 448
@@ -437,6 +456,15 @@ static int count_bits32(uint32_t *addr, unsigned size)
437 return count; 456 return count;
438} 457}
439 458
459static void fail_log_device(struct log_c *lc)
460{
461 if (lc->log_dev_failed)
462 return;
463
464 lc->log_dev_failed = 1;
465 dm_table_event(lc->ti->table);
466}
467
440static int disk_resume(struct dirty_log *log) 468static int disk_resume(struct dirty_log *log)
441{ 469{
442 int r; 470 int r;
@@ -446,8 +474,19 @@ static int disk_resume(struct dirty_log *log)
446 474
447 /* read the disk header */ 475 /* read the disk header */
448 r = read_header(lc); 476 r = read_header(lc);
449 if (r) 477 if (r) {
450 return r; 478 DMWARN("%s: Failed to read header on mirror log device",
479 lc->log_dev->name);
480 fail_log_device(lc);
481 /*
482 * If the log device cannot be read, we must assume
483 * all regions are out-of-sync. If we simply return
484 * here, the state will be uninitialized and could
485 * lead us to return 'in-sync' status for regions
486 * that are actually 'out-of-sync'.
487 */
488 lc->header.nr_regions = 0;
489 }
451 490
452 /* set or clear any new bits -- device has grown */ 491 /* set or clear any new bits -- device has grown */
453 if (lc->sync == NOSYNC) 492 if (lc->sync == NOSYNC)
@@ -472,7 +511,14 @@ static int disk_resume(struct dirty_log *log)
472 lc->header.nr_regions = lc->region_count; 511 lc->header.nr_regions = lc->region_count;
473 512
474 /* write the new header */ 513 /* write the new header */
475 return write_header(lc); 514 r = write_header(lc);
515 if (r) {
516 DMWARN("%s: Failed to write header on mirror log device",
517 lc->log_dev->name);
518 fail_log_device(lc);
519 }
520
521 return r;
476} 522}
477 523
478static uint32_t core_get_region_size(struct dirty_log *log) 524static uint32_t core_get_region_size(struct dirty_log *log)
@@ -516,7 +562,9 @@ static int disk_flush(struct dirty_log *log)
516 return 0; 562 return 0;
517 563
518 r = write_header(lc); 564 r = write_header(lc);
519 if (!r) 565 if (r)
566 fail_log_device(lc);
567 else
520 lc->touched = 0; 568 lc->touched = 0;
521 569
522 return r; 570 return r;
@@ -591,6 +639,7 @@ static int core_status(struct dirty_log *log, status_type_t status,
591 639
592 switch(status) { 640 switch(status) {
593 case STATUSTYPE_INFO: 641 case STATUSTYPE_INFO:
642 DMEMIT("1 %s", log->type->name);
594 break; 643 break;
595 644
596 case STATUSTYPE_TABLE: 645 case STATUSTYPE_TABLE:
@@ -606,17 +655,17 @@ static int disk_status(struct dirty_log *log, status_type_t status,
606 char *result, unsigned int maxlen) 655 char *result, unsigned int maxlen)
607{ 656{
608 int sz = 0; 657 int sz = 0;
609 char buffer[16];
610 struct log_c *lc = log->context; 658 struct log_c *lc = log->context;
611 659
612 switch(status) { 660 switch(status) {
613 case STATUSTYPE_INFO: 661 case STATUSTYPE_INFO:
662 DMEMIT("3 %s %s %c", log->type->name, lc->log_dev->name,
663 lc->log_dev_failed ? 'D' : 'A');
614 break; 664 break;
615 665
616 case STATUSTYPE_TABLE: 666 case STATUSTYPE_TABLE:
617 format_dev_t(buffer, lc->log_dev->bdev->bd_dev);
618 DMEMIT("%s %u %s %u ", log->type->name, 667 DMEMIT("%s %u %s %u ", log->type->name,
619 lc->sync == DEFAULTSYNC ? 2 : 3, buffer, 668 lc->sync == DEFAULTSYNC ? 2 : 3, lc->log_dev->name,
620 lc->region_size); 669 lc->region_size);
621 DMEMIT_SYNC; 670 DMEMIT_SYNC;
622 } 671 }
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 3aa013506967..de54b39e6ffe 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -668,6 +668,9 @@ static int parse_hw_handler(struct arg_set *as, struct multipath *m)
668 return -EINVAL; 668 return -EINVAL;
669 } 669 }
670 670
671 m->hw_handler.md = dm_table_get_md(ti->table);
672 dm_put(m->hw_handler.md);
673
671 r = hwht->create(&m->hw_handler, hw_argc - 1, as->argv); 674 r = hwht->create(&m->hw_handler, hw_argc - 1, as->argv);
672 if (r) { 675 if (r) {
673 dm_put_hw_handler(hwht); 676 dm_put_hw_handler(hwht);
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 23a642619bed..ef124b71ccc8 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -21,15 +21,11 @@
21#include <linux/workqueue.h> 21#include <linux/workqueue.h>
22 22
23#define DM_MSG_PREFIX "raid1" 23#define DM_MSG_PREFIX "raid1"
24#define DM_IO_PAGES 64
24 25
25static struct workqueue_struct *_kmirrord_wq; 26#define DM_RAID1_HANDLE_ERRORS 0x01
26static struct work_struct _kmirrord_work;
27static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
28 27
29static inline void wake(void) 28static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
30{
31 queue_work(_kmirrord_wq, &_kmirrord_work);
32}
33 29
34/*----------------------------------------------------------------- 30/*-----------------------------------------------------------------
35 * Region hash 31 * Region hash
@@ -125,17 +121,23 @@ struct mirror_set {
125 struct list_head list; 121 struct list_head list;
126 struct region_hash rh; 122 struct region_hash rh;
127 struct kcopyd_client *kcopyd_client; 123 struct kcopyd_client *kcopyd_client;
124 uint64_t features;
128 125
129 spinlock_t lock; /* protects the next two lists */ 126 spinlock_t lock; /* protects the next two lists */
130 struct bio_list reads; 127 struct bio_list reads;
131 struct bio_list writes; 128 struct bio_list writes;
132 129
130 struct dm_io_client *io_client;
131
133 /* recovery */ 132 /* recovery */
134 region_t nr_regions; 133 region_t nr_regions;
135 int in_sync; 134 int in_sync;
136 135
137 struct mirror *default_mirror; /* Default mirror */ 136 struct mirror *default_mirror; /* Default mirror */
138 137
138 struct workqueue_struct *kmirrord_wq;
139 struct work_struct kmirrord_work;
140
139 unsigned int nr_mirrors; 141 unsigned int nr_mirrors;
140 struct mirror mirror[0]; 142 struct mirror mirror[0];
141}; 143};
@@ -153,6 +155,11 @@ static inline sector_t region_to_sector(struct region_hash *rh, region_t region)
153 return region << rh->region_shift; 155 return region << rh->region_shift;
154} 156}
155 157
158static void wake(struct mirror_set *ms)
159{
160 queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
161}
162
156/* FIXME move this */ 163/* FIXME move this */
157static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw); 164static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw);
158 165
@@ -398,8 +405,7 @@ static void rh_update_states(struct region_hash *rh)
398 mempool_free(reg, rh->region_pool); 405 mempool_free(reg, rh->region_pool);
399 } 406 }
400 407
401 if (!list_empty(&recovered)) 408 rh->log->type->flush(rh->log);
402 rh->log->type->flush(rh->log);
403 409
404 list_for_each_entry_safe (reg, next, &clean, list) 410 list_for_each_entry_safe (reg, next, &clean, list)
405 mempool_free(reg, rh->region_pool); 411 mempool_free(reg, rh->region_pool);
@@ -471,7 +477,7 @@ static void rh_dec(struct region_hash *rh, region_t region)
471 spin_unlock_irqrestore(&rh->region_lock, flags); 477 spin_unlock_irqrestore(&rh->region_lock, flags);
472 478
473 if (should_wake) 479 if (should_wake)
474 wake(); 480 wake(rh->ms);
475} 481}
476 482
477/* 483/*
@@ -558,7 +564,7 @@ static void rh_recovery_end(struct region *reg, int success)
558 list_add(&reg->list, &reg->rh->recovered_regions); 564 list_add(&reg->list, &reg->rh->recovered_regions);
559 spin_unlock_irq(&rh->region_lock); 565 spin_unlock_irq(&rh->region_lock);
560 566
561 wake(); 567 wake(rh->ms);
562} 568}
563 569
564static void rh_flush(struct region_hash *rh) 570static void rh_flush(struct region_hash *rh)
@@ -592,7 +598,7 @@ static void rh_start_recovery(struct region_hash *rh)
592 for (i = 0; i < MAX_RECOVERY; i++) 598 for (i = 0; i < MAX_RECOVERY; i++)
593 up(&rh->recovery_count); 599 up(&rh->recovery_count);
594 600
595 wake(); 601 wake(rh->ms);
596} 602}
597 603
598/* 604/*
@@ -735,7 +741,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
735 /* 741 /*
736 * We can only read balance if the region is in sync. 742 * We can only read balance if the region is in sync.
737 */ 743 */
738 if (rh_in_sync(&ms->rh, region, 0)) 744 if (rh_in_sync(&ms->rh, region, 1))
739 m = choose_mirror(ms, bio->bi_sector); 745 m = choose_mirror(ms, bio->bi_sector);
740 else 746 else
741 m = ms->default_mirror; 747 m = ms->default_mirror;
@@ -792,6 +798,14 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
792 unsigned int i; 798 unsigned int i;
793 struct io_region io[KCOPYD_MAX_REGIONS+1]; 799 struct io_region io[KCOPYD_MAX_REGIONS+1];
794 struct mirror *m; 800 struct mirror *m;
801 struct dm_io_request io_req = {
802 .bi_rw = WRITE,
803 .mem.type = DM_IO_BVEC,
804 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
805 .notify.fn = write_callback,
806 .notify.context = bio,
807 .client = ms->io_client,
808 };
795 809
796 for (i = 0; i < ms->nr_mirrors; i++) { 810 for (i = 0; i < ms->nr_mirrors; i++) {
797 m = ms->mirror + i; 811 m = ms->mirror + i;
@@ -802,9 +816,8 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
802 } 816 }
803 817
804 bio_set_ms(bio, ms); 818 bio_set_ms(bio, ms);
805 dm_io_async_bvec(ms->nr_mirrors, io, WRITE, 819
806 bio->bi_io_vec + bio->bi_idx, 820 (void) dm_io(&io_req, ms->nr_mirrors, io, NULL);
807 write_callback, bio);
808} 821}
809 822
810static void do_writes(struct mirror_set *ms, struct bio_list *writes) 823static void do_writes(struct mirror_set *ms, struct bio_list *writes)
@@ -870,11 +883,10 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
870/*----------------------------------------------------------------- 883/*-----------------------------------------------------------------
871 * kmirrord 884 * kmirrord
872 *---------------------------------------------------------------*/ 885 *---------------------------------------------------------------*/
873static LIST_HEAD(_mirror_sets); 886static void do_mirror(struct work_struct *work)
874static DECLARE_RWSEM(_mirror_sets_lock);
875
876static void do_mirror(struct mirror_set *ms)
877{ 887{
888 struct mirror_set *ms =container_of(work, struct mirror_set,
889 kmirrord_work);
878 struct bio_list reads, writes; 890 struct bio_list reads, writes;
879 891
880 spin_lock(&ms->lock); 892 spin_lock(&ms->lock);
@@ -890,16 +902,6 @@ static void do_mirror(struct mirror_set *ms)
890 do_writes(ms, &writes); 902 do_writes(ms, &writes);
891} 903}
892 904
893static void do_work(struct work_struct *ignored)
894{
895 struct mirror_set *ms;
896
897 down_read(&_mirror_sets_lock);
898 list_for_each_entry (ms, &_mirror_sets, list)
899 do_mirror(ms);
900 up_read(&_mirror_sets_lock);
901}
902
903/*----------------------------------------------------------------- 905/*-----------------------------------------------------------------
904 * Target functions 906 * Target functions
905 *---------------------------------------------------------------*/ 907 *---------------------------------------------------------------*/
@@ -931,6 +933,13 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors,
931 ms->in_sync = 0; 933 ms->in_sync = 0;
932 ms->default_mirror = &ms->mirror[DEFAULT_MIRROR]; 934 ms->default_mirror = &ms->mirror[DEFAULT_MIRROR];
933 935
936 ms->io_client = dm_io_client_create(DM_IO_PAGES);
937 if (IS_ERR(ms->io_client)) {
938 ti->error = "Error creating dm_io client";
939 kfree(ms);
940 return NULL;
941 }
942
934 if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) { 943 if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) {
935 ti->error = "Error creating dirty region hash"; 944 ti->error = "Error creating dirty region hash";
936 kfree(ms); 945 kfree(ms);
@@ -946,6 +955,7 @@ static void free_context(struct mirror_set *ms, struct dm_target *ti,
946 while (m--) 955 while (m--)
947 dm_put_device(ti, ms->mirror[m].dev); 956 dm_put_device(ti, ms->mirror[m].dev);
948 957
958 dm_io_client_destroy(ms->io_client);
949 rh_exit(&ms->rh); 959 rh_exit(&ms->rh);
950 kfree(ms); 960 kfree(ms);
951} 961}
@@ -978,23 +988,6 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
978 return 0; 988 return 0;
979} 989}
980 990
981static int add_mirror_set(struct mirror_set *ms)
982{
983 down_write(&_mirror_sets_lock);
984 list_add_tail(&ms->list, &_mirror_sets);
985 up_write(&_mirror_sets_lock);
986 wake();
987
988 return 0;
989}
990
991static void del_mirror_set(struct mirror_set *ms)
992{
993 down_write(&_mirror_sets_lock);
994 list_del(&ms->list);
995 up_write(&_mirror_sets_lock);
996}
997
998/* 991/*
999 * Create dirty log: log_type #log_params <log_params> 992 * Create dirty log: log_type #log_params <log_params>
1000 */ 993 */
@@ -1037,16 +1030,55 @@ static struct dirty_log *create_dirty_log(struct dm_target *ti,
1037 return dl; 1030 return dl;
1038} 1031}
1039 1032
1033static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
1034 unsigned *args_used)
1035{
1036 unsigned num_features;
1037 struct dm_target *ti = ms->ti;
1038
1039 *args_used = 0;
1040
1041 if (!argc)
1042 return 0;
1043
1044 if (sscanf(argv[0], "%u", &num_features) != 1) {
1045 ti->error = "Invalid number of features";
1046 return -EINVAL;
1047 }
1048
1049 argc--;
1050 argv++;
1051 (*args_used)++;
1052
1053 if (num_features > argc) {
1054 ti->error = "Not enough arguments to support feature count";
1055 return -EINVAL;
1056 }
1057
1058 if (!strcmp("handle_errors", argv[0]))
1059 ms->features |= DM_RAID1_HANDLE_ERRORS;
1060 else {
1061 ti->error = "Unrecognised feature requested";
1062 return -EINVAL;
1063 }
1064
1065 (*args_used)++;
1066
1067 return 0;
1068}
1069
1040/* 1070/*
1041 * Construct a mirror mapping: 1071 * Construct a mirror mapping:
1042 * 1072 *
1043 * log_type #log_params <log_params> 1073 * log_type #log_params <log_params>
1044 * #mirrors [mirror_path offset]{2,} 1074 * #mirrors [mirror_path offset]{2,}
1075 * [#features <features>]
1045 * 1076 *
1046 * log_type is "core" or "disk" 1077 * log_type is "core" or "disk"
1047 * #log_params is between 1 and 3 1078 * #log_params is between 1 and 3
1079 *
1080 * If present, features must be "handle_errors".
1048 */ 1081 */
1049#define DM_IO_PAGES 64
1050static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv) 1082static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1051{ 1083{
1052 int r; 1084 int r;
@@ -1070,8 +1102,8 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1070 1102
1071 argv++, argc--; 1103 argv++, argc--;
1072 1104
1073 if (argc != nr_mirrors * 2) { 1105 if (argc < nr_mirrors * 2) {
1074 ti->error = "Wrong number of mirror arguments"; 1106 ti->error = "Too few mirror arguments";
1075 dm_destroy_dirty_log(dl); 1107 dm_destroy_dirty_log(dl);
1076 return -EINVAL; 1108 return -EINVAL;
1077 } 1109 }
@@ -1096,13 +1128,37 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1096 ti->private = ms; 1128 ti->private = ms;
1097 ti->split_io = ms->rh.region_size; 1129 ti->split_io = ms->rh.region_size;
1098 1130
1131 ms->kmirrord_wq = create_singlethread_workqueue("kmirrord");
1132 if (!ms->kmirrord_wq) {
1133 DMERR("couldn't start kmirrord");
1134 free_context(ms, ti, m);
1135 return -ENOMEM;
1136 }
1137 INIT_WORK(&ms->kmirrord_work, do_mirror);
1138
1139 r = parse_features(ms, argc, argv, &args_used);
1140 if (r) {
1141 free_context(ms, ti, ms->nr_mirrors);
1142 return r;
1143 }
1144
1145 argv += args_used;
1146 argc -= args_used;
1147
1148 if (argc) {
1149 ti->error = "Too many mirror arguments";
1150 free_context(ms, ti, ms->nr_mirrors);
1151 return -EINVAL;
1152 }
1153
1099 r = kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client); 1154 r = kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client);
1100 if (r) { 1155 if (r) {
1156 destroy_workqueue(ms->kmirrord_wq);
1101 free_context(ms, ti, ms->nr_mirrors); 1157 free_context(ms, ti, ms->nr_mirrors);
1102 return r; 1158 return r;
1103 } 1159 }
1104 1160
1105 add_mirror_set(ms); 1161 wake(ms);
1106 return 0; 1162 return 0;
1107} 1163}
1108 1164
@@ -1110,8 +1166,9 @@ static void mirror_dtr(struct dm_target *ti)
1110{ 1166{
1111 struct mirror_set *ms = (struct mirror_set *) ti->private; 1167 struct mirror_set *ms = (struct mirror_set *) ti->private;
1112 1168
1113 del_mirror_set(ms); 1169 flush_workqueue(ms->kmirrord_wq);
1114 kcopyd_client_destroy(ms->kcopyd_client); 1170 kcopyd_client_destroy(ms->kcopyd_client);
1171 destroy_workqueue(ms->kmirrord_wq);
1115 free_context(ms, ti, ms->nr_mirrors); 1172 free_context(ms, ti, ms->nr_mirrors);
1116} 1173}
1117 1174
@@ -1127,7 +1184,7 @@ static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
1127 spin_unlock(&ms->lock); 1184 spin_unlock(&ms->lock);
1128 1185
1129 if (should_wake) 1186 if (should_wake)
1130 wake(); 1187 wake(ms);
1131} 1188}
1132 1189
1133/* 1190/*
@@ -1222,11 +1279,9 @@ static void mirror_resume(struct dm_target *ti)
1222static int mirror_status(struct dm_target *ti, status_type_t type, 1279static int mirror_status(struct dm_target *ti, status_type_t type,
1223 char *result, unsigned int maxlen) 1280 char *result, unsigned int maxlen)
1224{ 1281{
1225 unsigned int m, sz; 1282 unsigned int m, sz = 0;
1226 struct mirror_set *ms = (struct mirror_set *) ti->private; 1283 struct mirror_set *ms = (struct mirror_set *) ti->private;
1227 1284
1228 sz = ms->rh.log->type->status(ms->rh.log, type, result, maxlen);
1229
1230 switch (type) { 1285 switch (type) {
1231 case STATUSTYPE_INFO: 1286 case STATUSTYPE_INFO:
1232 DMEMIT("%d ", ms->nr_mirrors); 1287 DMEMIT("%d ", ms->nr_mirrors);
@@ -1237,13 +1292,21 @@ static int mirror_status(struct dm_target *ti, status_type_t type,
1237 (unsigned long long)ms->rh.log->type-> 1292 (unsigned long long)ms->rh.log->type->
1238 get_sync_count(ms->rh.log), 1293 get_sync_count(ms->rh.log),
1239 (unsigned long long)ms->nr_regions); 1294 (unsigned long long)ms->nr_regions);
1295
1296 sz = ms->rh.log->type->status(ms->rh.log, type, result, maxlen);
1297
1240 break; 1298 break;
1241 1299
1242 case STATUSTYPE_TABLE: 1300 case STATUSTYPE_TABLE:
1301 sz = ms->rh.log->type->status(ms->rh.log, type, result, maxlen);
1302
1243 DMEMIT("%d", ms->nr_mirrors); 1303 DMEMIT("%d", ms->nr_mirrors);
1244 for (m = 0; m < ms->nr_mirrors; m++) 1304 for (m = 0; m < ms->nr_mirrors; m++)
1245 DMEMIT(" %s %llu", ms->mirror[m].dev->name, 1305 DMEMIT(" %s %llu", ms->mirror[m].dev->name,
1246 (unsigned long long)ms->mirror[m].offset); 1306 (unsigned long long)ms->mirror[m].offset);
1307
1308 if (ms->features & DM_RAID1_HANDLE_ERRORS)
1309 DMEMIT(" 1 handle_errors");
1247 } 1310 }
1248 1311
1249 return 0; 1312 return 0;
@@ -1251,7 +1314,7 @@ static int mirror_status(struct dm_target *ti, status_type_t type,
1251 1314
1252static struct target_type mirror_target = { 1315static struct target_type mirror_target = {
1253 .name = "mirror", 1316 .name = "mirror",
1254 .version = {1, 0, 2}, 1317 .version = {1, 0, 3},
1255 .module = THIS_MODULE, 1318 .module = THIS_MODULE,
1256 .ctr = mirror_ctr, 1319 .ctr = mirror_ctr,
1257 .dtr = mirror_dtr, 1320 .dtr = mirror_dtr,
@@ -1270,20 +1333,11 @@ static int __init dm_mirror_init(void)
1270 if (r) 1333 if (r)
1271 return r; 1334 return r;
1272 1335
1273 _kmirrord_wq = create_singlethread_workqueue("kmirrord");
1274 if (!_kmirrord_wq) {
1275 DMERR("couldn't start kmirrord");
1276 dm_dirty_log_exit();
1277 return r;
1278 }
1279 INIT_WORK(&_kmirrord_work, do_work);
1280
1281 r = dm_register_target(&mirror_target); 1336 r = dm_register_target(&mirror_target);
1282 if (r < 0) { 1337 if (r < 0) {
1283 DMERR("%s: Failed to register mirror target", 1338 DMERR("%s: Failed to register mirror target",
1284 mirror_target.name); 1339 mirror_target.name);
1285 dm_dirty_log_exit(); 1340 dm_dirty_log_exit();
1286 destroy_workqueue(_kmirrord_wq);
1287 } 1341 }
1288 1342
1289 return r; 1343 return r;
@@ -1297,7 +1351,6 @@ static void __exit dm_mirror_exit(void)
1297 if (r < 0) 1351 if (r < 0)
1298 DMERR("%s: unregister failed %d", mirror_target.name, r); 1352 DMERR("%s: unregister failed %d", mirror_target.name, r);
1299 1353
1300 destroy_workqueue(_kmirrord_wq);
1301 dm_dirty_log_exit(); 1354 dm_dirty_log_exit();
1302} 1355}
1303 1356
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 05befa91807a..2fc199b0016b 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -425,13 +425,15 @@ static void close_dev(struct dm_dev *d, struct mapped_device *md)
425} 425}
426 426
427/* 427/*
428 * If possible (ie. blk_size[major] is set), this checks an area 428 * If possible, this checks an area of a destination device is valid.
429 * of a destination device is valid.
430 */ 429 */
431static int check_device_area(struct dm_dev *dd, sector_t start, sector_t len) 430static int check_device_area(struct dm_dev *dd, sector_t start, sector_t len)
432{ 431{
433 sector_t dev_size; 432 sector_t dev_size = dd->bdev->bd_inode->i_size >> SECTOR_SHIFT;
434 dev_size = dd->bdev->bd_inode->i_size >> SECTOR_SHIFT; 433
434 if (!dev_size)
435 return 1;
436
435 return ((start < dev_size) && (len <= (dev_size - start))); 437 return ((start < dev_size) && (len <= (dev_size - start)));
436} 438}
437 439
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 11a98df298ec..2717a355dc5b 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1236,6 +1236,7 @@ void dm_put(struct mapped_device *md)
1236 free_dev(md); 1236 free_dev(md);
1237 } 1237 }
1238} 1238}
1239EXPORT_SYMBOL_GPL(dm_put);
1239 1240
1240/* 1241/*
1241 * Process the deferred bios 1242 * Process the deferred bios
diff --git a/drivers/md/kcopyd.c b/drivers/md/kcopyd.c
index b46f6c575f7e..dbc234e3c69f 100644
--- a/drivers/md/kcopyd.c
+++ b/drivers/md/kcopyd.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (C) 2002 Sistina Software (UK) Limited. 2 * Copyright (C) 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2006 Red Hat GmbH
3 * 4 *
4 * This file is released under the GPL. 5 * This file is released under the GPL.
5 * 6 *
@@ -45,6 +46,8 @@ struct kcopyd_client {
45 unsigned int nr_pages; 46 unsigned int nr_pages;
46 unsigned int nr_free_pages; 47 unsigned int nr_free_pages;
47 48
49 struct dm_io_client *io_client;
50
48 wait_queue_head_t destroyq; 51 wait_queue_head_t destroyq;
49 atomic_t nr_jobs; 52 atomic_t nr_jobs;
50}; 53};
@@ -342,16 +345,20 @@ static void complete_io(unsigned long error, void *context)
342static int run_io_job(struct kcopyd_job *job) 345static int run_io_job(struct kcopyd_job *job)
343{ 346{
344 int r; 347 int r;
348 struct dm_io_request io_req = {
349 .bi_rw = job->rw,
350 .mem.type = DM_IO_PAGE_LIST,
351 .mem.ptr.pl = job->pages,
352 .mem.offset = job->offset,
353 .notify.fn = complete_io,
354 .notify.context = job,
355 .client = job->kc->io_client,
356 };
345 357
346 if (job->rw == READ) 358 if (job->rw == READ)
347 r = dm_io_async(1, &job->source, job->rw, 359 r = dm_io(&io_req, 1, &job->source, NULL);
348 job->pages,
349 job->offset, complete_io, job);
350
351 else 360 else
352 r = dm_io_async(job->num_dests, job->dests, job->rw, 361 r = dm_io(&io_req, job->num_dests, job->dests, NULL);
353 job->pages,
354 job->offset, complete_io, job);
355 362
356 return r; 363 return r;
357} 364}
@@ -670,8 +677,9 @@ int kcopyd_client_create(unsigned int nr_pages, struct kcopyd_client **result)
670 return r; 677 return r;
671 } 678 }
672 679
673 r = dm_io_get(nr_pages); 680 kc->io_client = dm_io_client_create(nr_pages);
674 if (r) { 681 if (IS_ERR(kc->io_client)) {
682 r = PTR_ERR(kc->io_client);
675 client_free_pages(kc); 683 client_free_pages(kc);
676 kfree(kc); 684 kfree(kc);
677 kcopyd_exit(); 685 kcopyd_exit();
@@ -691,7 +699,7 @@ void kcopyd_client_destroy(struct kcopyd_client *kc)
691 /* Wait for completion of all jobs submitted by this client. */ 699 /* Wait for completion of all jobs submitted by this client. */
692 wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs)); 700 wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs));
693 701
694 dm_io_put(kc->nr_pages); 702 dm_io_client_destroy(kc->io_client);
695 client_free_pages(kc); 703 client_free_pages(kc);
696 client_del(kc); 704 client_del(kc);
697 kfree(kc); 705 kfree(kc);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 2b4315d7e5d6..2901d0c0ee9e 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -33,6 +33,7 @@
33*/ 33*/
34 34
35#include <linux/module.h> 35#include <linux/module.h>
36#include <linux/kernel.h>
36#include <linux/kthread.h> 37#include <linux/kthread.h>
37#include <linux/linkage.h> 38#include <linux/linkage.h>
38#include <linux/raid/md.h> 39#include <linux/raid/md.h>
@@ -273,6 +274,7 @@ static mddev_t * mddev_find(dev_t unit)
273 atomic_set(&new->active, 1); 274 atomic_set(&new->active, 1);
274 spin_lock_init(&new->write_lock); 275 spin_lock_init(&new->write_lock);
275 init_waitqueue_head(&new->sb_wait); 276 init_waitqueue_head(&new->sb_wait);
277 new->reshape_position = MaxSector;
276 278
277 new->queue = blk_alloc_queue(GFP_KERNEL); 279 new->queue = blk_alloc_queue(GFP_KERNEL);
278 if (!new->queue) { 280 if (!new->queue) {
@@ -589,14 +591,41 @@ abort:
589 return ret; 591 return ret;
590} 592}
591 593
594
595static u32 md_csum_fold(u32 csum)
596{
597 csum = (csum & 0xffff) + (csum >> 16);
598 return (csum & 0xffff) + (csum >> 16);
599}
600
592static unsigned int calc_sb_csum(mdp_super_t * sb) 601static unsigned int calc_sb_csum(mdp_super_t * sb)
593{ 602{
603 u64 newcsum = 0;
604 u32 *sb32 = (u32*)sb;
605 int i;
594 unsigned int disk_csum, csum; 606 unsigned int disk_csum, csum;
595 607
596 disk_csum = sb->sb_csum; 608 disk_csum = sb->sb_csum;
597 sb->sb_csum = 0; 609 sb->sb_csum = 0;
598 csum = csum_partial((void *)sb, MD_SB_BYTES, 0); 610
611 for (i = 0; i < MD_SB_BYTES/4 ; i++)
612 newcsum += sb32[i];
613 csum = (newcsum & 0xffffffff) + (newcsum>>32);
614
615
616#ifdef CONFIG_ALPHA
617 /* This used to use csum_partial, which was wrong for several
618 * reasons including that different results are returned on
619 * different architectures. It isn't critical that we get exactly
620 * the same return value as before (we always csum_fold before
621 * testing, and that removes any differences). However as we
622 * know that csum_partial always returned a 16bit value on
623 * alphas, do a fold to maximise conformity to previous behaviour.
624 */
625 sb->sb_csum = md_csum_fold(disk_csum);
626#else
599 sb->sb_csum = disk_csum; 627 sb->sb_csum = disk_csum;
628#endif
600 return csum; 629 return csum;
601} 630}
602 631
@@ -684,7 +713,7 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
684 if (sb->raid_disks <= 0) 713 if (sb->raid_disks <= 0)
685 goto abort; 714 goto abort;
686 715
687 if (csum_fold(calc_sb_csum(sb)) != csum_fold(sb->sb_csum)) { 716 if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
688 printk(KERN_WARNING "md: invalid superblock checksum on %s\n", 717 printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
689 b); 718 b);
690 goto abort; 719 goto abort;
@@ -694,6 +723,17 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
694 rdev->data_offset = 0; 723 rdev->data_offset = 0;
695 rdev->sb_size = MD_SB_BYTES; 724 rdev->sb_size = MD_SB_BYTES;
696 725
726 if (sb->state & (1<<MD_SB_BITMAP_PRESENT)) {
727 if (sb->level != 1 && sb->level != 4
728 && sb->level != 5 && sb->level != 6
729 && sb->level != 10) {
730 /* FIXME use a better test */
731 printk(KERN_WARNING
732 "md: bitmaps not supported for this level.\n");
733 goto abort;
734 }
735 }
736
697 if (sb->level == LEVEL_MULTIPATH) 737 if (sb->level == LEVEL_MULTIPATH)
698 rdev->desc_nr = -1; 738 rdev->desc_nr = -1;
699 else 739 else
@@ -792,16 +832,8 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
792 mddev->max_disks = MD_SB_DISKS; 832 mddev->max_disks = MD_SB_DISKS;
793 833
794 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) && 834 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
795 mddev->bitmap_file == NULL) { 835 mddev->bitmap_file == NULL)
796 if (mddev->level != 1 && mddev->level != 4
797 && mddev->level != 5 && mddev->level != 6
798 && mddev->level != 10) {
799 /* FIXME use a better test */
800 printk(KERN_WARNING "md: bitmaps not supported for this level.\n");
801 return -EINVAL;
802 }
803 mddev->bitmap_offset = mddev->default_bitmap_offset; 836 mddev->bitmap_offset = mddev->default_bitmap_offset;
804 }
805 837
806 } else if (mddev->pers == NULL) { 838 } else if (mddev->pers == NULL) {
807 /* Insist on good event counter while assembling */ 839 /* Insist on good event counter while assembling */
@@ -1058,6 +1090,18 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1058 bdevname(rdev->bdev,b)); 1090 bdevname(rdev->bdev,b));
1059 return -EINVAL; 1091 return -EINVAL;
1060 } 1092 }
1093 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET)) {
1094 if (sb->level != cpu_to_le32(1) &&
1095 sb->level != cpu_to_le32(4) &&
1096 sb->level != cpu_to_le32(5) &&
1097 sb->level != cpu_to_le32(6) &&
1098 sb->level != cpu_to_le32(10)) {
1099 printk(KERN_WARNING
1100 "md: bitmaps not supported for this level.\n");
1101 return -EINVAL;
1102 }
1103 }
1104
1061 rdev->preferred_minor = 0xffff; 1105 rdev->preferred_minor = 0xffff;
1062 rdev->data_offset = le64_to_cpu(sb->data_offset); 1106 rdev->data_offset = le64_to_cpu(sb->data_offset);
1063 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); 1107 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
@@ -1141,14 +1185,9 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1141 mddev->max_disks = (4096-256)/2; 1185 mddev->max_disks = (4096-256)/2;
1142 1186
1143 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) && 1187 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1144 mddev->bitmap_file == NULL ) { 1188 mddev->bitmap_file == NULL )
1145 if (mddev->level != 1 && mddev->level != 5 && mddev->level != 6
1146 && mddev->level != 10) {
1147 printk(KERN_WARNING "md: bitmaps not supported for this level.\n");
1148 return -EINVAL;
1149 }
1150 mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset); 1189 mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset);
1151 } 1190
1152 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) { 1191 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1153 mddev->reshape_position = le64_to_cpu(sb->reshape_position); 1192 mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1154 mddev->delta_disks = le32_to_cpu(sb->delta_disks); 1193 mddev->delta_disks = le32_to_cpu(sb->delta_disks);
@@ -2204,6 +2243,10 @@ static ssize_t
2204layout_show(mddev_t *mddev, char *page) 2243layout_show(mddev_t *mddev, char *page)
2205{ 2244{
2206 /* just a number, not meaningful for all levels */ 2245 /* just a number, not meaningful for all levels */
2246 if (mddev->reshape_position != MaxSector &&
2247 mddev->layout != mddev->new_layout)
2248 return sprintf(page, "%d (%d)\n",
2249 mddev->new_layout, mddev->layout);
2207 return sprintf(page, "%d\n", mddev->layout); 2250 return sprintf(page, "%d\n", mddev->layout);
2208} 2251}
2209 2252
@@ -2212,13 +2255,16 @@ layout_store(mddev_t *mddev, const char *buf, size_t len)
2212{ 2255{
2213 char *e; 2256 char *e;
2214 unsigned long n = simple_strtoul(buf, &e, 10); 2257 unsigned long n = simple_strtoul(buf, &e, 10);
2215 if (mddev->pers)
2216 return -EBUSY;
2217 2258
2218 if (!*buf || (*e && *e != '\n')) 2259 if (!*buf || (*e && *e != '\n'))
2219 return -EINVAL; 2260 return -EINVAL;
2220 2261
2221 mddev->layout = n; 2262 if (mddev->pers)
2263 return -EBUSY;
2264 if (mddev->reshape_position != MaxSector)
2265 mddev->new_layout = n;
2266 else
2267 mddev->layout = n;
2222 return len; 2268 return len;
2223} 2269}
2224static struct md_sysfs_entry md_layout = 2270static struct md_sysfs_entry md_layout =
@@ -2230,6 +2276,10 @@ raid_disks_show(mddev_t *mddev, char *page)
2230{ 2276{
2231 if (mddev->raid_disks == 0) 2277 if (mddev->raid_disks == 0)
2232 return 0; 2278 return 0;
2279 if (mddev->reshape_position != MaxSector &&
2280 mddev->delta_disks != 0)
2281 return sprintf(page, "%d (%d)\n", mddev->raid_disks,
2282 mddev->raid_disks - mddev->delta_disks);
2233 return sprintf(page, "%d\n", mddev->raid_disks); 2283 return sprintf(page, "%d\n", mddev->raid_disks);
2234} 2284}
2235 2285
@@ -2247,7 +2297,11 @@ raid_disks_store(mddev_t *mddev, const char *buf, size_t len)
2247 2297
2248 if (mddev->pers) 2298 if (mddev->pers)
2249 rv = update_raid_disks(mddev, n); 2299 rv = update_raid_disks(mddev, n);
2250 else 2300 else if (mddev->reshape_position != MaxSector) {
2301 int olddisks = mddev->raid_disks - mddev->delta_disks;
2302 mddev->delta_disks = n - olddisks;
2303 mddev->raid_disks = n;
2304 } else
2251 mddev->raid_disks = n; 2305 mddev->raid_disks = n;
2252 return rv ? rv : len; 2306 return rv ? rv : len;
2253} 2307}
@@ -2257,6 +2311,10 @@ __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
2257static ssize_t 2311static ssize_t
2258chunk_size_show(mddev_t *mddev, char *page) 2312chunk_size_show(mddev_t *mddev, char *page)
2259{ 2313{
2314 if (mddev->reshape_position != MaxSector &&
2315 mddev->chunk_size != mddev->new_chunk)
2316 return sprintf(page, "%d (%d)\n", mddev->new_chunk,
2317 mddev->chunk_size);
2260 return sprintf(page, "%d\n", mddev->chunk_size); 2318 return sprintf(page, "%d\n", mddev->chunk_size);
2261} 2319}
2262 2320
@@ -2267,12 +2325,15 @@ chunk_size_store(mddev_t *mddev, const char *buf, size_t len)
2267 char *e; 2325 char *e;
2268 unsigned long n = simple_strtoul(buf, &e, 10); 2326 unsigned long n = simple_strtoul(buf, &e, 10);
2269 2327
2270 if (mddev->pers)
2271 return -EBUSY;
2272 if (!*buf || (*e && *e != '\n')) 2328 if (!*buf || (*e && *e != '\n'))
2273 return -EINVAL; 2329 return -EINVAL;
2274 2330
2275 mddev->chunk_size = n; 2331 if (mddev->pers)
2332 return -EBUSY;
2333 else if (mddev->reshape_position != MaxSector)
2334 mddev->new_chunk = n;
2335 else
2336 mddev->chunk_size = n;
2276 return len; 2337 return len;
2277} 2338}
2278static struct md_sysfs_entry md_chunk_size = 2339static struct md_sysfs_entry md_chunk_size =
@@ -2637,8 +2698,7 @@ metadata_store(mddev_t *mddev, const char *buf, size_t len)
2637 minor = simple_strtoul(buf, &e, 10); 2698 minor = simple_strtoul(buf, &e, 10);
2638 if (e==buf || (*e && *e != '\n') ) 2699 if (e==buf || (*e && *e != '\n') )
2639 return -EINVAL; 2700 return -EINVAL;
2640 if (major >= sizeof(super_types)/sizeof(super_types[0]) || 2701 if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
2641 super_types[major].name == NULL)
2642 return -ENOENT; 2702 return -ENOENT;
2643 mddev->major_version = major; 2703 mddev->major_version = major;
2644 mddev->minor_version = minor; 2704 mddev->minor_version = minor;
@@ -2859,6 +2919,37 @@ suspend_hi_store(mddev_t *mddev, const char *buf, size_t len)
2859static struct md_sysfs_entry md_suspend_hi = 2919static struct md_sysfs_entry md_suspend_hi =
2860__ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store); 2920__ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
2861 2921
2922static ssize_t
2923reshape_position_show(mddev_t *mddev, char *page)
2924{
2925 if (mddev->reshape_position != MaxSector)
2926 return sprintf(page, "%llu\n",
2927 (unsigned long long)mddev->reshape_position);
2928 strcpy(page, "none\n");
2929 return 5;
2930}
2931
2932static ssize_t
2933reshape_position_store(mddev_t *mddev, const char *buf, size_t len)
2934{
2935 char *e;
2936 unsigned long long new = simple_strtoull(buf, &e, 10);
2937 if (mddev->pers)
2938 return -EBUSY;
2939 if (buf == e || (*e && *e != '\n'))
2940 return -EINVAL;
2941 mddev->reshape_position = new;
2942 mddev->delta_disks = 0;
2943 mddev->new_level = mddev->level;
2944 mddev->new_layout = mddev->layout;
2945 mddev->new_chunk = mddev->chunk_size;
2946 return len;
2947}
2948
2949static struct md_sysfs_entry md_reshape_position =
2950__ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
2951 reshape_position_store);
2952
2862 2953
2863static struct attribute *md_default_attrs[] = { 2954static struct attribute *md_default_attrs[] = {
2864 &md_level.attr, 2955 &md_level.attr,
@@ -2871,6 +2962,7 @@ static struct attribute *md_default_attrs[] = {
2871 &md_new_device.attr, 2962 &md_new_device.attr,
2872 &md_safe_delay.attr, 2963 &md_safe_delay.attr,
2873 &md_array_state.attr, 2964 &md_array_state.attr,
2965 &md_reshape_position.attr,
2874 NULL, 2966 NULL,
2875}; 2967};
2876 2968
@@ -3012,6 +3104,7 @@ static int do_md_run(mddev_t * mddev)
3012 struct gendisk *disk; 3104 struct gendisk *disk;
3013 struct mdk_personality *pers; 3105 struct mdk_personality *pers;
3014 char b[BDEVNAME_SIZE]; 3106 char b[BDEVNAME_SIZE];
3107 struct block_device *bdev;
3015 3108
3016 if (list_empty(&mddev->disks)) 3109 if (list_empty(&mddev->disks))
3017 /* cannot run an array with no devices.. */ 3110 /* cannot run an array with no devices.. */
@@ -3239,7 +3332,13 @@ static int do_md_run(mddev_t * mddev)
3239 md_wakeup_thread(mddev->thread); 3332 md_wakeup_thread(mddev->thread);
3240 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ 3333 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
3241 3334
3242 mddev->changed = 1; 3335 bdev = bdget_disk(mddev->gendisk, 0);
3336 if (bdev) {
3337 bd_set_size(bdev, mddev->array_size << 1);
3338 blkdev_ioctl(bdev->bd_inode, NULL, BLKRRPART, 0);
3339 bdput(bdev);
3340 }
3341
3243 md_new_event(mddev); 3342 md_new_event(mddev);
3244 kobject_uevent(&mddev->gendisk->kobj, KOBJ_CHANGE); 3343 kobject_uevent(&mddev->gendisk->kobj, KOBJ_CHANGE);
3245 return 0; 3344 return 0;
@@ -3361,7 +3460,6 @@ static int do_md_stop(mddev_t * mddev, int mode)
3361 mddev->pers = NULL; 3460 mddev->pers = NULL;
3362 3461
3363 set_capacity(disk, 0); 3462 set_capacity(disk, 0);
3364 mddev->changed = 1;
3365 3463
3366 if (mddev->ro) 3464 if (mddev->ro)
3367 mddev->ro = 0; 3465 mddev->ro = 0;
@@ -3409,6 +3507,7 @@ static int do_md_stop(mddev_t * mddev, int mode)
3409 mddev->size = 0; 3507 mddev->size = 0;
3410 mddev->raid_disks = 0; 3508 mddev->raid_disks = 0;
3411 mddev->recovery_cp = 0; 3509 mddev->recovery_cp = 0;
3510 mddev->reshape_position = MaxSector;
3412 3511
3413 } else if (mddev->pers) 3512 } else if (mddev->pers)
3414 printk(KERN_INFO "md: %s switched to read-only mode.\n", 3513 printk(KERN_INFO "md: %s switched to read-only mode.\n",
@@ -4019,7 +4118,7 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
4019 if (info->raid_disks == 0) { 4118 if (info->raid_disks == 0) {
4020 /* just setting version number for superblock loading */ 4119 /* just setting version number for superblock loading */
4021 if (info->major_version < 0 || 4120 if (info->major_version < 0 ||
4022 info->major_version >= sizeof(super_types)/sizeof(super_types[0]) || 4121 info->major_version >= ARRAY_SIZE(super_types) ||
4023 super_types[info->major_version].name == NULL) { 4122 super_types[info->major_version].name == NULL) {
4024 /* maybe try to auto-load a module? */ 4123 /* maybe try to auto-load a module? */
4025 printk(KERN_INFO 4124 printk(KERN_INFO
@@ -4500,20 +4599,6 @@ static int md_release(struct inode *inode, struct file * file)
4500 return 0; 4599 return 0;
4501} 4600}
4502 4601
4503static int md_media_changed(struct gendisk *disk)
4504{
4505 mddev_t *mddev = disk->private_data;
4506
4507 return mddev->changed;
4508}
4509
4510static int md_revalidate(struct gendisk *disk)
4511{
4512 mddev_t *mddev = disk->private_data;
4513
4514 mddev->changed = 0;
4515 return 0;
4516}
4517static struct block_device_operations md_fops = 4602static struct block_device_operations md_fops =
4518{ 4603{
4519 .owner = THIS_MODULE, 4604 .owner = THIS_MODULE,
@@ -4521,8 +4606,6 @@ static struct block_device_operations md_fops =
4521 .release = md_release, 4606 .release = md_release,
4522 .ioctl = md_ioctl, 4607 .ioctl = md_ioctl,
4523 .getgeo = md_getgeo, 4608 .getgeo = md_getgeo,
4524 .media_changed = md_media_changed,
4525 .revalidate_disk= md_revalidate,
4526}; 4609};
4527 4610
4528static int md_thread(void * arg) 4611static int md_thread(void * arg)
@@ -4941,15 +5024,6 @@ static int md_seq_open(struct inode *inode, struct file *file)
4941 return error; 5024 return error;
4942} 5025}
4943 5026
4944static int md_seq_release(struct inode *inode, struct file *file)
4945{
4946 struct seq_file *m = file->private_data;
4947 struct mdstat_info *mi = m->private;
4948 m->private = NULL;
4949 kfree(mi);
4950 return seq_release(inode, file);
4951}
4952
4953static unsigned int mdstat_poll(struct file *filp, poll_table *wait) 5027static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
4954{ 5028{
4955 struct seq_file *m = filp->private_data; 5029 struct seq_file *m = filp->private_data;
@@ -4971,7 +5045,7 @@ static const struct file_operations md_seq_fops = {
4971 .open = md_seq_open, 5045 .open = md_seq_open,
4972 .read = seq_read, 5046 .read = seq_read,
4973 .llseek = seq_lseek, 5047 .llseek = seq_lseek,
4974 .release = md_seq_release, 5048 .release = seq_release_private,
4975 .poll = mdstat_poll, 5049 .poll = mdstat_poll,
4976}; 5050};
4977 5051
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 97ee870b265d..1b7130cad21f 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -2063,7 +2063,6 @@ static int raid1_resize(mddev_t *mddev, sector_t sectors)
2063 */ 2063 */
2064 mddev->array_size = sectors>>1; 2064 mddev->array_size = sectors>>1;
2065 set_capacity(mddev->gendisk, mddev->array_size << 1); 2065 set_capacity(mddev->gendisk, mddev->array_size << 1);
2066 mddev->changed = 1;
2067 if (mddev->array_size > mddev->size && mddev->recovery_cp == MaxSector) { 2066 if (mddev->array_size > mddev->size && mddev->recovery_cp == MaxSector) {
2068 mddev->recovery_cp = mddev->size << 1; 2067 mddev->recovery_cp = mddev->size << 1;
2069 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2068 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 8d59914f2057..a72e70ad0975 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -353,8 +353,8 @@ static int grow_stripes(raid5_conf_t *conf, int num)
353 struct kmem_cache *sc; 353 struct kmem_cache *sc;
354 int devs = conf->raid_disks; 354 int devs = conf->raid_disks;
355 355
356 sprintf(conf->cache_name[0], "raid5/%s", mdname(conf->mddev)); 356 sprintf(conf->cache_name[0], "raid5-%s", mdname(conf->mddev));
357 sprintf(conf->cache_name[1], "raid5/%s-alt", mdname(conf->mddev)); 357 sprintf(conf->cache_name[1], "raid5-%s-alt", mdname(conf->mddev));
358 conf->active_name = 0; 358 conf->active_name = 0;
359 sc = kmem_cache_create(conf->cache_name[conf->active_name], 359 sc = kmem_cache_create(conf->cache_name[conf->active_name],
360 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev), 360 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
@@ -3864,7 +3864,6 @@ static int raid5_resize(mddev_t *mddev, sector_t sectors)
3864 sectors &= ~((sector_t)mddev->chunk_size/512 - 1); 3864 sectors &= ~((sector_t)mddev->chunk_size/512 - 1);
3865 mddev->array_size = (sectors * (mddev->raid_disks-conf->max_degraded))>>1; 3865 mddev->array_size = (sectors * (mddev->raid_disks-conf->max_degraded))>>1;
3866 set_capacity(mddev->gendisk, mddev->array_size << 1); 3866 set_capacity(mddev->gendisk, mddev->array_size << 1);
3867 mddev->changed = 1;
3868 if (sectors/2 > mddev->size && mddev->recovery_cp == MaxSector) { 3867 if (sectors/2 > mddev->size && mddev->recovery_cp == MaxSector) {
3869 mddev->recovery_cp = mddev->size << 1; 3868 mddev->recovery_cp = mddev->size << 1;
3870 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3869 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
@@ -3999,7 +3998,6 @@ static void end_reshape(raid5_conf_t *conf)
3999 conf->mddev->array_size = conf->mddev->size * 3998 conf->mddev->array_size = conf->mddev->size *
4000 (conf->raid_disks - conf->max_degraded); 3999 (conf->raid_disks - conf->max_degraded);
4001 set_capacity(conf->mddev->gendisk, conf->mddev->array_size << 1); 4000 set_capacity(conf->mddev->gendisk, conf->mddev->array_size << 1);
4002 conf->mddev->changed = 1;
4003 4001
4004 bdev = bdget_disk(conf->mddev->gendisk, 0); 4002 bdev = bdget_disk(conf->mddev->gendisk, 0);
4005 if (bdev) { 4003 if (bdev) {
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index b6c16704aaab..7385acfa1dd9 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -501,9 +501,9 @@ void mmc_detect_change(struct mmc_host *host, unsigned long delay)
501{ 501{
502#ifdef CONFIG_MMC_DEBUG 502#ifdef CONFIG_MMC_DEBUG
503 unsigned long flags; 503 unsigned long flags;
504 spin_lock_irqsave(host->lock, flags); 504 spin_lock_irqsave(&host->lock, flags);
505 BUG_ON(host->removed); 505 BUG_ON(host->removed);
506 spin_unlock_irqrestore(host->lock, flags); 506 spin_unlock_irqrestore(&host->lock, flags);
507#endif 507#endif
508 508
509 mmc_schedule_delayed_work(&host->detect, delay); 509 mmc_schedule_delayed_work(&host->detect, delay);
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 3a03a74c0609..637ae8f68791 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -1214,7 +1214,7 @@ e1000_remove(struct pci_dev *pdev)
1214 int i; 1214 int i;
1215#endif 1215#endif
1216 1216
1217 flush_scheduled_work(); 1217 cancel_work_sync(&adapter->reset_task);
1218 1218
1219 e1000_release_manageability(adapter); 1219 e1000_release_manageability(adapter);
1220 1220
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index eed433d6056a..f71dab347667 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -662,10 +662,10 @@ int phy_stop_interrupts(struct phy_device *phydev)
662 phy_error(phydev); 662 phy_error(phydev);
663 663
664 /* 664 /*
665 * Finish any pending work; we might have been scheduled 665 * Finish any pending work; we might have been scheduled to be called
666 * to be called from keventd ourselves, though. 666 * from keventd ourselves, but cancel_work_sync() handles that.
667 */ 667 */
668 run_scheduled_work(&phydev->phy_queue); 668 cancel_work_sync(&phydev->phy_queue);
669 669
670 free_irq(phydev->irq, phydev); 670 free_irq(phydev->irq, phydev);
671 671
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index e5e901ecd808..923b9c725cc3 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -3716,10 +3716,8 @@ static void tg3_reset_task(struct work_struct *work)
3716 unsigned int restart_timer; 3716 unsigned int restart_timer;
3717 3717
3718 tg3_full_lock(tp, 0); 3718 tg3_full_lock(tp, 0);
3719 tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3720 3719
3721 if (!netif_running(tp->dev)) { 3720 if (!netif_running(tp->dev)) {
3722 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3723 tg3_full_unlock(tp); 3721 tg3_full_unlock(tp);
3724 return; 3722 return;
3725 } 3723 }
@@ -3750,8 +3748,6 @@ static void tg3_reset_task(struct work_struct *work)
3750 mod_timer(&tp->timer, jiffies + 1); 3748 mod_timer(&tp->timer, jiffies + 1);
3751 3749
3752out: 3750out:
3753 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3754
3755 tg3_full_unlock(tp); 3751 tg3_full_unlock(tp);
3756} 3752}
3757 3753
@@ -7390,12 +7386,7 @@ static int tg3_close(struct net_device *dev)
7390{ 7386{
7391 struct tg3 *tp = netdev_priv(dev); 7387 struct tg3 *tp = netdev_priv(dev);
7392 7388
7393 /* Calling flush_scheduled_work() may deadlock because 7389 cancel_work_sync(&tp->reset_task);
7394 * linkwatch_event() may be on the workqueue and it will try to get
7395 * the rtnl_lock which we are holding.
7396 */
7397 while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
7398 msleep(1);
7399 7390
7400 netif_stop_queue(dev); 7391 netif_stop_queue(dev);
7401 7392
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 4d334cf5a243..bd9f4f428e5b 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2228,7 +2228,7 @@ struct tg3 {
2228#define TG3_FLAG_JUMBO_RING_ENABLE 0x00800000 2228#define TG3_FLAG_JUMBO_RING_ENABLE 0x00800000
2229#define TG3_FLAG_10_100_ONLY 0x01000000 2229#define TG3_FLAG_10_100_ONLY 0x01000000
2230#define TG3_FLAG_PAUSE_AUTONEG 0x02000000 2230#define TG3_FLAG_PAUSE_AUTONEG 0x02000000
2231#define TG3_FLAG_IN_RESET_TASK 0x04000000 2231
2232#define TG3_FLAG_40BIT_DMA_BUG 0x08000000 2232#define TG3_FLAG_40BIT_DMA_BUG 0x08000000
2233#define TG3_FLAG_BROKEN_CHECKSUMS 0x10000000 2233#define TG3_FLAG_BROKEN_CHECKSUMS 0x10000000
2234#define TG3_FLAG_SUPPORT_MSI 0x20000000 2234#define TG3_FLAG_SUPPORT_MSI 0x20000000
diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c
index 66e7bc985797..1d8a2f6bb8eb 100644
--- a/drivers/spi/atmel_spi.c
+++ b/drivers/spi/atmel_spi.c
@@ -22,10 +22,7 @@
22#include <asm/io.h> 22#include <asm/io.h>
23#include <asm/arch/board.h> 23#include <asm/arch/board.h>
24#include <asm/arch/gpio.h> 24#include <asm/arch/gpio.h>
25
26#ifdef CONFIG_ARCH_AT91
27#include <asm/arch/cpu.h> 25#include <asm/arch/cpu.h>
28#endif
29 26
30#include "atmel_spi.h" 27#include "atmel_spi.h"
31 28
@@ -552,10 +549,8 @@ static int __init atmel_spi_probe(struct platform_device *pdev)
552 goto out_free_buffer; 549 goto out_free_buffer;
553 as->irq = irq; 550 as->irq = irq;
554 as->clk = clk; 551 as->clk = clk;
555#ifdef CONFIG_ARCH_AT91
556 if (!cpu_is_at91rm9200()) 552 if (!cpu_is_at91rm9200())
557 as->new_1 = 1; 553 as->new_1 = 1;
558#endif
559 554
560 ret = request_irq(irq, atmel_spi_interrupt, 0, 555 ret = request_irq(irq, atmel_spi_interrupt, 0,
561 pdev->dev.bus_id, master); 556 pdev->dev.bus_id, master);
diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
index b082d95bbbaa..11e9b15ca45a 100644
--- a/drivers/usb/atm/usbatm.c
+++ b/drivers/usb/atm/usbatm.c
@@ -1033,7 +1033,7 @@ static int usbatm_do_heavy_init(void *arg)
1033 1033
1034static int usbatm_heavy_init(struct usbatm_data *instance) 1034static int usbatm_heavy_init(struct usbatm_data *instance)
1035{ 1035{
1036 int ret = kernel_thread(usbatm_do_heavy_init, instance, CLONE_KERNEL); 1036 int ret = kernel_thread(usbatm_do_heavy_init, instance, CLONE_FS | CLONE_FILES);
1037 1037
1038 if (ret < 0) { 1038 if (ret < 0) {
1039 usb_err(instance, "%s: failed to create kernel_thread (%d)!\n", __func__, ret); 1039 usb_err(instance, "%s: failed to create kernel_thread (%d)!\n", __func__, ret);
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 1132ba5ff391..9a256d2ff9dc 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -1348,6 +1348,20 @@ config FB_VOODOO1
1348 Please read the <file:Documentation/fb/README-sstfb.txt> for supported 1348 Please read the <file:Documentation/fb/README-sstfb.txt> for supported
1349 options and other important info support. 1349 options and other important info support.
1350 1350
1351config FB_VT8623
1352 tristate "VIA VT8623 support"
1353 depends on FB && PCI
1354 select FB_CFB_FILLRECT
1355 select FB_CFB_COPYAREA
1356 select FB_CFB_IMAGEBLIT
1357 select FB_TILEBLITTING
1358 select FB_SVGALIB
1359 select VGASTATE
1360 select FONT_8x16 if FRAMEBUFFER_CONSOLE
1361 ---help---
1362 Driver for CastleRock integrated graphics core in the
1363 VIA VT8623 [Apollo CLE266] chipset.
1364
1351config FB_CYBLA 1365config FB_CYBLA
1352 tristate "Cyberblade/i1 support" 1366 tristate "Cyberblade/i1 support"
1353 depends on FB && PCI && X86_32 && !64BIT 1367 depends on FB && PCI && X86_32 && !64BIT
@@ -1401,6 +1415,20 @@ config FB_TRIDENT_ACCEL
1401 This will compile the Trident frame buffer device with 1415 This will compile the Trident frame buffer device with
1402 acceleration functions. 1416 acceleration functions.
1403 1417
1418config FB_ARK
1419 tristate "ARK 2000PV support"
1420 depends on FB && PCI
1421 select FB_CFB_FILLRECT
1422 select FB_CFB_COPYAREA
1423 select FB_CFB_IMAGEBLIT
1424 select FB_TILEBLITTING
1425 select FB_SVGALIB
1426 select VGASTATE
1427 select FONT_8x16 if FRAMEBUFFER_CONSOLE
1428 ---help---
1429 Driver for PCI graphics boards with ARK 2000PV chip
1430 and ICS 5342 RAMDAC.
1431
1404config FB_PM3 1432config FB_PM3
1405 tristate "Permedia3 support" 1433 tristate "Permedia3 support"
1406 depends on FB && PCI && BROKEN 1434 depends on FB && PCI && BROKEN
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index a916c204274f..0b70567458fb 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -54,10 +54,12 @@ obj-$(CONFIG_FB_VALKYRIE) += valkyriefb.o
54obj-$(CONFIG_FB_CT65550) += chipsfb.o 54obj-$(CONFIG_FB_CT65550) += chipsfb.o
55obj-$(CONFIG_FB_IMSTT) += imsttfb.o 55obj-$(CONFIG_FB_IMSTT) += imsttfb.o
56obj-$(CONFIG_FB_FM2) += fm2fb.o 56obj-$(CONFIG_FB_FM2) += fm2fb.o
57obj-$(CONFIG_FB_VT8623) += vt8623fb.o
57obj-$(CONFIG_FB_CYBLA) += cyblafb.o 58obj-$(CONFIG_FB_CYBLA) += cyblafb.o
58obj-$(CONFIG_FB_TRIDENT) += tridentfb.o 59obj-$(CONFIG_FB_TRIDENT) += tridentfb.o
59obj-$(CONFIG_FB_LE80578) += vermilion/ 60obj-$(CONFIG_FB_LE80578) += vermilion/
60obj-$(CONFIG_FB_S3) += s3fb.o 61obj-$(CONFIG_FB_S3) += s3fb.o
62obj-$(CONFIG_FB_ARK) += arkfb.o
61obj-$(CONFIG_FB_STI) += stifb.o 63obj-$(CONFIG_FB_STI) += stifb.o
62obj-$(CONFIG_FB_FFB) += ffb.o sbuslib.o 64obj-$(CONFIG_FB_FFB) += ffb.o sbuslib.o
63obj-$(CONFIG_FB_CG6) += cg6.o sbuslib.o 65obj-$(CONFIG_FB_CG6) += cg6.o sbuslib.o
diff --git a/drivers/video/arkfb.c b/drivers/video/arkfb.c
new file mode 100644
index 000000000000..ba6fede5c466
--- /dev/null
+++ b/drivers/video/arkfb.c
@@ -0,0 +1,1200 @@
1/*
2 * linux/drivers/video/arkfb.c -- Frame buffer device driver for ARK 2000PV
3 * with ICS 5342 dac (it is easy to add support for different dacs).
4 *
5 * Copyright (c) 2007 Ondrej Zajicek <santiago@crfreenet.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file COPYING in the main directory of this archive for
9 * more details.
10 *
11 * Code is based on s3fb
12 */
13
14#include <linux/version.h>
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/errno.h>
18#include <linux/string.h>
19#include <linux/mm.h>
20#include <linux/tty.h>
21#include <linux/slab.h>
22#include <linux/delay.h>
23#include <linux/fb.h>
24#include <linux/svga.h>
25#include <linux/init.h>
26#include <linux/pci.h>
27#include <linux/console.h> /* Why should fb driver call console functions? because acquire_console_sem() */
28#include <video/vga.h>
29
30#ifdef CONFIG_MTRR
31#include <asm/mtrr.h>
32#endif
33
34struct arkfb_info {
35 int mclk_freq;
36 int mtrr_reg;
37
38 struct dac_info *dac;
39 struct vgastate state;
40 struct mutex open_lock;
41 unsigned int ref_count;
42 u32 pseudo_palette[16];
43};
44
45
46/* ------------------------------------------------------------------------- */
47
48
49static const struct svga_fb_format arkfb_formats[] = {
50 { 0, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0,
51 FB_TYPE_TEXT, FB_AUX_TEXT_SVGA_STEP4, FB_VISUAL_PSEUDOCOLOR, 8, 8},
52 { 4, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0,
53 FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_PSEUDOCOLOR, 8, 16},
54 { 4, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 1,
55 FB_TYPE_INTERLEAVED_PLANES, 1, FB_VISUAL_PSEUDOCOLOR, 8, 16},
56 { 8, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0,
57 FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_PSEUDOCOLOR, 8, 8},
58 {16, {10, 5, 0}, {5, 5, 0}, {0, 5, 0}, {0, 0, 0}, 0,
59 FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_TRUECOLOR, 4, 4},
60 {16, {11, 5, 0}, {5, 6, 0}, {0, 5, 0}, {0, 0, 0}, 0,
61 FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_TRUECOLOR, 4, 4},
62 {24, {16, 8, 0}, {8, 8, 0}, {0, 8, 0}, {0, 0, 0}, 0,
63 FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_TRUECOLOR, 8, 8},
64 {32, {16, 8, 0}, {8, 8, 0}, {0, 8, 0}, {0, 0, 0}, 0,
65 FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_TRUECOLOR, 2, 2},
66 SVGA_FORMAT_END
67};
68
69
70/* CRT timing register sets */
71
72static const struct vga_regset ark_h_total_regs[] = {{0x00, 0, 7}, {0x41, 7, 7}, VGA_REGSET_END};
73static const struct vga_regset ark_h_display_regs[] = {{0x01, 0, 7}, {0x41, 6, 6}, VGA_REGSET_END};
74static const struct vga_regset ark_h_blank_start_regs[] = {{0x02, 0, 7}, {0x41, 5, 5}, VGA_REGSET_END};
75static const struct vga_regset ark_h_blank_end_regs[] = {{0x03, 0, 4}, {0x05, 7, 7 }, VGA_REGSET_END};
76static const struct vga_regset ark_h_sync_start_regs[] = {{0x04, 0, 7}, {0x41, 4, 4}, VGA_REGSET_END};
77static const struct vga_regset ark_h_sync_end_regs[] = {{0x05, 0, 4}, VGA_REGSET_END};
78
79static const struct vga_regset ark_v_total_regs[] = {{0x06, 0, 7}, {0x07, 0, 0}, {0x07, 5, 5}, {0x40, 7, 7}, VGA_REGSET_END};
80static const struct vga_regset ark_v_display_regs[] = {{0x12, 0, 7}, {0x07, 1, 1}, {0x07, 6, 6}, {0x40, 6, 6}, VGA_REGSET_END};
81static const struct vga_regset ark_v_blank_start_regs[] = {{0x15, 0, 7}, {0x07, 3, 3}, {0x09, 5, 5}, {0x40, 5, 5}, VGA_REGSET_END};
82// const struct vga_regset ark_v_blank_end_regs[] = {{0x16, 0, 6}, VGA_REGSET_END};
83static const struct vga_regset ark_v_blank_end_regs[] = {{0x16, 0, 7}, VGA_REGSET_END};
84static const struct vga_regset ark_v_sync_start_regs[] = {{0x10, 0, 7}, {0x07, 2, 2}, {0x07, 7, 7}, {0x40, 4, 4}, VGA_REGSET_END};
85static const struct vga_regset ark_v_sync_end_regs[] = {{0x11, 0, 3}, VGA_REGSET_END};
86
87static const struct vga_regset ark_line_compare_regs[] = {{0x18, 0, 7}, {0x07, 4, 4}, {0x09, 6, 6}, VGA_REGSET_END};
88static const struct vga_regset ark_start_address_regs[] = {{0x0d, 0, 7}, {0x0c, 0, 7}, {0x40, 0, 2}, VGA_REGSET_END};
89static const struct vga_regset ark_offset_regs[] = {{0x13, 0, 7}, {0x41, 3, 3}, VGA_REGSET_END};
90
91static const struct svga_timing_regs ark_timing_regs = {
92 ark_h_total_regs, ark_h_display_regs, ark_h_blank_start_regs,
93 ark_h_blank_end_regs, ark_h_sync_start_regs, ark_h_sync_end_regs,
94 ark_v_total_regs, ark_v_display_regs, ark_v_blank_start_regs,
95 ark_v_blank_end_regs, ark_v_sync_start_regs, ark_v_sync_end_regs,
96};
97
98
99/* ------------------------------------------------------------------------- */
100
101
102/* Module parameters */
103
104static char *mode = "640x480-8@60";
105
106#ifdef CONFIG_MTRR
107static int mtrr = 1;
108#endif
109
110MODULE_AUTHOR("(c) 2007 Ondrej Zajicek <santiago@crfreenet.org>");
111MODULE_LICENSE("GPL");
112MODULE_DESCRIPTION("fbdev driver for ARK 2000PV");
113
114module_param(mode, charp, 0444);
115MODULE_PARM_DESC(mode, "Default video mode ('640x480-8@60', etc)");
116
117#ifdef CONFIG_MTRR
118module_param(mtrr, int, 0444);
119MODULE_PARM_DESC(mtrr, "Enable write-combining with MTRR (1=enable, 0=disable, default=1)");
120#endif
121
122static int threshold = 4;
123
124module_param(threshold, int, 0644);
125MODULE_PARM_DESC(threshold, "FIFO threshold");
126
127
128/* ------------------------------------------------------------------------- */
129
130
131static void arkfb_settile(struct fb_info *info, struct fb_tilemap *map)
132{
133 const u8 *font = map->data;
134 u8 __iomem *fb = (u8 __iomem *)info->screen_base;
135 int i, c;
136
137 if ((map->width != 8) || (map->height != 16) ||
138 (map->depth != 1) || (map->length != 256)) {
139 printk(KERN_ERR "fb%d: unsupported font parameters: width %d, "
140 "height %d, depth %d, length %d\n", info->node,
141 map->width, map->height, map->depth, map->length);
142 return;
143 }
144
145 fb += 2;
146 for (c = 0; c < map->length; c++) {
147 for (i = 0; i < map->height; i++) {
148 fb_writeb(font[i], &fb[i * 4]);
149 fb_writeb(font[i], &fb[i * 4 + (128 * 8)]);
150 }
151 fb += 128;
152
153 if ((c % 8) == 7)
154 fb += 128*8;
155
156 font += map->height;
157 }
158}
159
160static struct fb_tile_ops arkfb_tile_ops = {
161 .fb_settile = arkfb_settile,
162 .fb_tilecopy = svga_tilecopy,
163 .fb_tilefill = svga_tilefill,
164 .fb_tileblit = svga_tileblit,
165 .fb_tilecursor = svga_tilecursor,
166 .fb_get_tilemax = svga_get_tilemax,
167};
168
169
170/* ------------------------------------------------------------------------- */
171
172
173/* image data is MSB-first, fb structure is MSB-first too */
174static inline u32 expand_color(u32 c)
175{
176 return ((c & 1) | ((c & 2) << 7) | ((c & 4) << 14) | ((c & 8) << 21)) * 0xFF;
177}
178
179/* arkfb_iplan_imageblit silently assumes that almost everything is 8-pixel aligned */
180static void arkfb_iplan_imageblit(struct fb_info *info, const struct fb_image *image)
181{
182 u32 fg = expand_color(image->fg_color);
183 u32 bg = expand_color(image->bg_color);
184 const u8 *src1, *src;
185 u8 __iomem *dst1;
186 u32 __iomem *dst;
187 u32 val;
188 int x, y;
189
190 src1 = image->data;
191 dst1 = info->screen_base + (image->dy * info->fix.line_length)
192 + ((image->dx / 8) * 4);
193
194 for (y = 0; y < image->height; y++) {
195 src = src1;
196 dst = (u32 __iomem *) dst1;
197 for (x = 0; x < image->width; x += 8) {
198 val = *(src++) * 0x01010101;
199 val = (val & fg) | (~val & bg);
200 fb_writel(val, dst++);
201 }
202 src1 += image->width / 8;
203 dst1 += info->fix.line_length;
204 }
205
206}
207
208/* arkfb_iplan_fillrect silently assumes that almost everything is 8-pixel aligned */
209static void arkfb_iplan_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
210{
211 u32 fg = expand_color(rect->color);
212 u8 __iomem *dst1;
213 u32 __iomem *dst;
214 int x, y;
215
216 dst1 = info->screen_base + (rect->dy * info->fix.line_length)
217 + ((rect->dx / 8) * 4);
218
219 for (y = 0; y < rect->height; y++) {
220 dst = (u32 __iomem *) dst1;
221 for (x = 0; x < rect->width; x += 8) {
222 fb_writel(fg, dst++);
223 }
224 dst1 += info->fix.line_length;
225 }
226
227}
228
229
230/* image data is MSB-first, fb structure is high-nibble-in-low-byte-first */
231static inline u32 expand_pixel(u32 c)
232{
233 return (((c & 1) << 24) | ((c & 2) << 27) | ((c & 4) << 14) | ((c & 8) << 17) |
234 ((c & 16) << 4) | ((c & 32) << 7) | ((c & 64) >> 6) | ((c & 128) >> 3)) * 0xF;
235}
236
237/* arkfb_cfb4_imageblit silently assumes that almost everything is 8-pixel aligned */
238static void arkfb_cfb4_imageblit(struct fb_info *info, const struct fb_image *image)
239{
240 u32 fg = image->fg_color * 0x11111111;
241 u32 bg = image->bg_color * 0x11111111;
242 const u8 *src1, *src;
243 u8 __iomem *dst1;
244 u32 __iomem *dst;
245 u32 val;
246 int x, y;
247
248 src1 = image->data;
249 dst1 = info->screen_base + (image->dy * info->fix.line_length)
250 + ((image->dx / 8) * 4);
251
252 for (y = 0; y < image->height; y++) {
253 src = src1;
254 dst = (u32 __iomem *) dst1;
255 for (x = 0; x < image->width; x += 8) {
256 val = expand_pixel(*(src++));
257 val = (val & fg) | (~val & bg);
258 fb_writel(val, dst++);
259 }
260 src1 += image->width / 8;
261 dst1 += info->fix.line_length;
262 }
263
264}
265
266static void arkfb_imageblit(struct fb_info *info, const struct fb_image *image)
267{
268 if ((info->var.bits_per_pixel == 4) && (image->depth == 1)
269 && ((image->width % 8) == 0) && ((image->dx % 8) == 0)) {
270 if (info->fix.type == FB_TYPE_INTERLEAVED_PLANES)
271 arkfb_iplan_imageblit(info, image);
272 else
273 arkfb_cfb4_imageblit(info, image);
274 } else
275 cfb_imageblit(info, image);
276}
277
278static void arkfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
279{
280 if ((info->var.bits_per_pixel == 4)
281 && ((rect->width % 8) == 0) && ((rect->dx % 8) == 0)
282 && (info->fix.type == FB_TYPE_INTERLEAVED_PLANES))
283 arkfb_iplan_fillrect(info, rect);
284 else
285 cfb_fillrect(info, rect);
286}
287
288
289/* ------------------------------------------------------------------------- */
290
291
292enum
293{
294 DAC_PSEUDO8_8,
295 DAC_RGB1555_8,
296 DAC_RGB0565_8,
297 DAC_RGB0888_8,
298 DAC_RGB8888_8,
299 DAC_PSEUDO8_16,
300 DAC_RGB1555_16,
301 DAC_RGB0565_16,
302 DAC_RGB0888_16,
303 DAC_RGB8888_16,
304 DAC_MAX
305};
306
307struct dac_ops {
308 int (*dac_get_mode)(struct dac_info *info);
309 int (*dac_set_mode)(struct dac_info *info, int mode);
310 int (*dac_get_freq)(struct dac_info *info, int channel);
311 int (*dac_set_freq)(struct dac_info *info, int channel, u32 freq);
312 void (*dac_release)(struct dac_info *info);
313};
314
315typedef void (*dac_read_regs_t)(void *data, u8 *code, int count);
316typedef void (*dac_write_regs_t)(void *data, u8 *code, int count);
317
318struct dac_info
319{
320 struct dac_ops *dacops;
321 dac_read_regs_t dac_read_regs;
322 dac_write_regs_t dac_write_regs;
323 void *data;
324};
325
326
327static inline u8 dac_read_reg(struct dac_info *info, u8 reg)
328{
329 u8 code[2] = {reg, 0};
330 info->dac_read_regs(info->data, code, 1);
331 return code[1];
332}
333
334static inline void dac_read_regs(struct dac_info *info, u8 *code, int count)
335{
336 info->dac_read_regs(info->data, code, count);
337}
338
339static inline void dac_write_reg(struct dac_info *info, u8 reg, u8 val)
340{
341 u8 code[2] = {reg, val};
342 info->dac_write_regs(info->data, code, 1);
343}
344
345static inline void dac_write_regs(struct dac_info *info, u8 *code, int count)
346{
347 info->dac_write_regs(info->data, code, count);
348}
349
350static inline int dac_set_mode(struct dac_info *info, int mode)
351{
352 return info->dacops->dac_set_mode(info, mode);
353}
354
355static inline int dac_set_freq(struct dac_info *info, int channel, u32 freq)
356{
357 return info->dacops->dac_set_freq(info, channel, freq);
358}
359
360static inline void dac_release(struct dac_info *info)
361{
362 info->dacops->dac_release(info);
363}
364
365
366/* ------------------------------------------------------------------------- */
367
368
369/* ICS5342 DAC */
370
371struct ics5342_info
372{
373 struct dac_info dac;
374 u8 mode;
375};
376
377#define DAC_PAR(info) ((struct ics5342_info *) info)
378
379/* LSB is set to distinguish unused slots */
380static const u8 ics5342_mode_table[DAC_MAX] = {
381 [DAC_PSEUDO8_8] = 0x01, [DAC_RGB1555_8] = 0x21, [DAC_RGB0565_8] = 0x61,
382 [DAC_RGB0888_8] = 0x41, [DAC_PSEUDO8_16] = 0x11, [DAC_RGB1555_16] = 0x31,
383 [DAC_RGB0565_16] = 0x51, [DAC_RGB0888_16] = 0x91, [DAC_RGB8888_16] = 0x71
384};
385
386static int ics5342_set_mode(struct dac_info *info, int mode)
387{
388 u8 code;
389
390 if (mode >= DAC_MAX)
391 return -EINVAL;
392
393 code = ics5342_mode_table[mode];
394
395 if (! code)
396 return -EINVAL;
397
398 dac_write_reg(info, 6, code & 0xF0);
399 DAC_PAR(info)->mode = mode;
400
401 return 0;
402}
403
404static const struct svga_pll ics5342_pll = {3, 129, 3, 33, 0, 3,
405 60000, 250000, 14318};
406
407/* pd4 - allow only posdivider 4 (r=2) */
408static const struct svga_pll ics5342_pll_pd4 = {3, 129, 3, 33, 2, 2,
409 60000, 335000, 14318};
410
411/* 270 MHz should be upper bound for VCO clock according to specs,
412 but that is too restrictive in pd4 case */
413
414static int ics5342_set_freq(struct dac_info *info, int channel, u32 freq)
415{
416 u16 m, n, r;
417
418 /* only postdivider 4 (r=2) is valid in mode DAC_PSEUDO8_16 */
419 int rv = svga_compute_pll((DAC_PAR(info)->mode == DAC_PSEUDO8_16)
420 ? &ics5342_pll_pd4 : &ics5342_pll,
421 freq, &m, &n, &r, 0);
422
423 if (rv < 0) {
424 return -EINVAL;
425 } else {
426 u8 code[6] = {4, 3, 5, m-2, 5, (n-2) | (r << 5)};
427 dac_write_regs(info, code, 3);
428 return 0;
429 }
430}
431
432static void ics5342_release(struct dac_info *info)
433{
434 ics5342_set_mode(info, DAC_PSEUDO8_8);
435 kfree(info);
436}
437
438static struct dac_ops ics5342_ops = {
439 .dac_set_mode = ics5342_set_mode,
440 .dac_set_freq = ics5342_set_freq,
441 .dac_release = ics5342_release
442};
443
444
445static struct dac_info * ics5342_init(dac_read_regs_t drr, dac_write_regs_t dwr, void *data)
446{
447 struct dac_info *info = kzalloc(sizeof(struct ics5342_info), GFP_KERNEL);
448
449 if (! info)
450 return NULL;
451
452 info->dacops = &ics5342_ops;
453 info->dac_read_regs = drr;
454 info->dac_write_regs = dwr;
455 info->data = data;
456 DAC_PAR(info)->mode = DAC_PSEUDO8_8; /* estimation */
457 return info;
458}
459
460
461/* ------------------------------------------------------------------------- */
462
463
464static unsigned short dac_regs[4] = {0x3c8, 0x3c9, 0x3c6, 0x3c7};
465
466static void ark_dac_read_regs(void *data, u8 *code, int count)
467{
468 u8 regval = vga_rseq(NULL, 0x1C);
469
470 while (count != 0)
471 {
472 vga_wseq(NULL, 0x1C, regval | (code[0] & 4) ? 0x80 : 0);
473 code[1] = vga_r(NULL, dac_regs[code[0] & 3]);
474 count--;
475 code += 2;
476 }
477
478 vga_wseq(NULL, 0x1C, regval);
479}
480
481static void ark_dac_write_regs(void *data, u8 *code, int count)
482{
483 u8 regval = vga_rseq(NULL, 0x1C);
484
485 while (count != 0)
486 {
487 vga_wseq(NULL, 0x1C, regval | (code[0] & 4) ? 0x80 : 0);
488 vga_w(NULL, dac_regs[code[0] & 3], code[1]);
489 count--;
490 code += 2;
491 }
492
493 vga_wseq(NULL, 0x1C, regval);
494}
495
496
497static void ark_set_pixclock(struct fb_info *info, u32 pixclock)
498{
499 struct arkfb_info *par = info->par;
500 u8 regval;
501
502 int rv = dac_set_freq(par->dac, 0, 1000000000 / pixclock);
503 if (rv < 0) {
504 printk(KERN_ERR "fb%d: cannot set requested pixclock, keeping old value\n", info->node);
505 return;
506 }
507
508 /* Set VGA misc register */
509 regval = vga_r(NULL, VGA_MIS_R);
510 vga_w(NULL, VGA_MIS_W, regval | VGA_MIS_ENB_PLL_LOAD);
511}
512
513
514/* Open framebuffer */
515
516static int arkfb_open(struct fb_info *info, int user)
517{
518 struct arkfb_info *par = info->par;
519
520 mutex_lock(&(par->open_lock));
521 if (par->ref_count == 0) {
522 memset(&(par->state), 0, sizeof(struct vgastate));
523 par->state.flags = VGA_SAVE_MODE | VGA_SAVE_FONTS | VGA_SAVE_CMAP;
524 par->state.num_crtc = 0x60;
525 par->state.num_seq = 0x30;
526 save_vga(&(par->state));
527 }
528
529 par->ref_count++;
530 mutex_unlock(&(par->open_lock));
531
532 return 0;
533}
534
535/* Close framebuffer */
536
537static int arkfb_release(struct fb_info *info, int user)
538{
539 struct arkfb_info *par = info->par;
540
541 mutex_lock(&(par->open_lock));
542 if (par->ref_count == 0) {
543 mutex_unlock(&(par->open_lock));
544 return -EINVAL;
545 }
546
547 if (par->ref_count == 1) {
548 restore_vga(&(par->state));
549 dac_set_mode(par->dac, DAC_PSEUDO8_8);
550 }
551
552 par->ref_count--;
553 mutex_unlock(&(par->open_lock));
554
555 return 0;
556}
557
558/* Validate passed in var */
559
560static int arkfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
561{
562 int rv, mem, step;
563
564 /* Find appropriate format */
565 rv = svga_match_format (arkfb_formats, var, NULL);
566 if (rv < 0)
567 {
568 printk(KERN_ERR "fb%d: unsupported mode requested\n", info->node);
569 return rv;
570 }
571
572 /* Do not allow to have real resoulution larger than virtual */
573 if (var->xres > var->xres_virtual)
574 var->xres_virtual = var->xres;
575
576 if (var->yres > var->yres_virtual)
577 var->yres_virtual = var->yres;
578
579 /* Round up xres_virtual to have proper alignment of lines */
580 step = arkfb_formats[rv].xresstep - 1;
581 var->xres_virtual = (var->xres_virtual+step) & ~step;
582
583
584 /* Check whether have enough memory */
585 mem = ((var->bits_per_pixel * var->xres_virtual) >> 3) * var->yres_virtual;
586 if (mem > info->screen_size)
587 {
588 printk(KERN_ERR "fb%d: not enough framebuffer memory (%d kB requested , %d kB available)\n", info->node, mem >> 10, (unsigned int) (info->screen_size >> 10));
589 return -EINVAL;
590 }
591
592 rv = svga_check_timings (&ark_timing_regs, var, info->node);
593 if (rv < 0)
594 {
595 printk(KERN_ERR "fb%d: invalid timings requested\n", info->node);
596 return rv;
597 }
598
599 /* Interlaced mode is broken */
600 if (var->vmode & FB_VMODE_INTERLACED)
601 return -EINVAL;
602
603 return 0;
604}
605
606/* Set video mode from par */
607
608static int arkfb_set_par(struct fb_info *info)
609{
610 struct arkfb_info *par = info->par;
611 u32 value, mode, hmul, hdiv, offset_value, screen_size;
612 u32 bpp = info->var.bits_per_pixel;
613 u8 regval;
614
615 if (bpp != 0) {
616 info->fix.ypanstep = 1;
617 info->fix.line_length = (info->var.xres_virtual * bpp) / 8;
618
619 info->flags &= ~FBINFO_MISC_TILEBLITTING;
620 info->tileops = NULL;
621
622 /* in 4bpp supports 8p wide tiles only, any tiles otherwise */
623 info->pixmap.blit_x = (bpp == 4) ? (1 << (8 - 1)) : (~(u32)0);
624 info->pixmap.blit_y = ~(u32)0;
625
626 offset_value = (info->var.xres_virtual * bpp) / 64;
627 screen_size = info->var.yres_virtual * info->fix.line_length;
628 } else {
629 info->fix.ypanstep = 16;
630 info->fix.line_length = 0;
631
632 info->flags |= FBINFO_MISC_TILEBLITTING;
633 info->tileops = &arkfb_tile_ops;
634
635 /* supports 8x16 tiles only */
636 info->pixmap.blit_x = 1 << (8 - 1);
637 info->pixmap.blit_y = 1 << (16 - 1);
638
639 offset_value = info->var.xres_virtual / 16;
640 screen_size = (info->var.xres_virtual * info->var.yres_virtual) / 64;
641 }
642
643 info->var.xoffset = 0;
644 info->var.yoffset = 0;
645 info->var.activate = FB_ACTIVATE_NOW;
646
647 /* Unlock registers */
648 svga_wcrt_mask(0x11, 0x00, 0x80);
649
650 /* Blank screen and turn off sync */
651 svga_wseq_mask(0x01, 0x20, 0x20);
652 svga_wcrt_mask(0x17, 0x00, 0x80);
653
654 /* Set default values */
655 svga_set_default_gfx_regs();
656 svga_set_default_atc_regs();
657 svga_set_default_seq_regs();
658 svga_set_default_crt_regs();
659 svga_wcrt_multi(ark_line_compare_regs, 0xFFFFFFFF);
660 svga_wcrt_multi(ark_start_address_regs, 0);
661
662 /* ARK specific initialization */
663 svga_wseq_mask(0x10, 0x1F, 0x1F); /* enable linear framebuffer and full memory access */
664 svga_wseq_mask(0x12, 0x03, 0x03); /* 4 MB linear framebuffer size */
665
666 vga_wseq(NULL, 0x13, info->fix.smem_start >> 16);
667 vga_wseq(NULL, 0x14, info->fix.smem_start >> 24);
668 vga_wseq(NULL, 0x15, 0);
669 vga_wseq(NULL, 0x16, 0);
670
671 /* Set the FIFO threshold register */
672 /* It is fascinating way to store 5-bit value in 8-bit register */
673 regval = 0x10 | ((threshold & 0x0E) >> 1) | (threshold & 0x01) << 7 | (threshold & 0x10) << 1;
674 vga_wseq(NULL, 0x18, regval);
675
676 /* Set the offset register */
677 pr_debug("fb%d: offset register : %d\n", info->node, offset_value);
678 svga_wcrt_multi(ark_offset_regs, offset_value);
679
680 /* fix for hi-res textmode */
681 svga_wcrt_mask(0x40, 0x08, 0x08);
682
683 if (info->var.vmode & FB_VMODE_DOUBLE)
684 svga_wcrt_mask(0x09, 0x80, 0x80);
685 else
686 svga_wcrt_mask(0x09, 0x00, 0x80);
687
688 if (info->var.vmode & FB_VMODE_INTERLACED)
689 svga_wcrt_mask(0x44, 0x04, 0x04);
690 else
691 svga_wcrt_mask(0x44, 0x00, 0x04);
692
693 hmul = 1;
694 hdiv = 1;
695 mode = svga_match_format(arkfb_formats, &(info->var), &(info->fix));
696
697 /* Set mode-specific register values */
698 switch (mode) {
699 case 0:
700 pr_debug("fb%d: text mode\n", info->node);
701 svga_set_textmode_vga_regs();
702
703 vga_wseq(NULL, 0x11, 0x10); /* basic VGA mode */
704 svga_wcrt_mask(0x46, 0x00, 0x04); /* 8bit pixel path */
705 dac_set_mode(par->dac, DAC_PSEUDO8_8);
706
707 break;
708 case 1:
709 pr_debug("fb%d: 4 bit pseudocolor\n", info->node);
710 vga_wgfx(NULL, VGA_GFX_MODE, 0x40);
711
712 vga_wseq(NULL, 0x11, 0x10); /* basic VGA mode */
713 svga_wcrt_mask(0x46, 0x00, 0x04); /* 8bit pixel path */
714 dac_set_mode(par->dac, DAC_PSEUDO8_8);
715 break;
716 case 2:
717 pr_debug("fb%d: 4 bit pseudocolor, planar\n", info->node);
718
719 vga_wseq(NULL, 0x11, 0x10); /* basic VGA mode */
720 svga_wcrt_mask(0x46, 0x00, 0x04); /* 8bit pixel path */
721 dac_set_mode(par->dac, DAC_PSEUDO8_8);
722 break;
723 case 3:
724 pr_debug("fb%d: 8 bit pseudocolor\n", info->node);
725
726 vga_wseq(NULL, 0x11, 0x16); /* 8bpp accel mode */
727
728 if (info->var.pixclock > 20000) {
729 pr_debug("fb%d: not using multiplex\n", info->node);
730 svga_wcrt_mask(0x46, 0x00, 0x04); /* 8bit pixel path */
731 dac_set_mode(par->dac, DAC_PSEUDO8_8);
732 } else {
733 pr_debug("fb%d: using multiplex\n", info->node);
734 svga_wcrt_mask(0x46, 0x04, 0x04); /* 16bit pixel path */
735 dac_set_mode(par->dac, DAC_PSEUDO8_16);
736 hdiv = 2;
737 }
738 break;
739 case 4:
740 pr_debug("fb%d: 5/5/5 truecolor\n", info->node);
741
742 vga_wseq(NULL, 0x11, 0x1A); /* 16bpp accel mode */
743 svga_wcrt_mask(0x46, 0x04, 0x04); /* 16bit pixel path */
744 dac_set_mode(par->dac, DAC_RGB1555_16);
745 break;
746 case 5:
747 pr_debug("fb%d: 5/6/5 truecolor\n", info->node);
748
749 vga_wseq(NULL, 0x11, 0x1A); /* 16bpp accel mode */
750 svga_wcrt_mask(0x46, 0x04, 0x04); /* 16bit pixel path */
751 dac_set_mode(par->dac, DAC_RGB0565_16);
752 break;
753 case 6:
754 pr_debug("fb%d: 8/8/8 truecolor\n", info->node);
755
756 vga_wseq(NULL, 0x11, 0x16); /* 8bpp accel mode ??? */
757 svga_wcrt_mask(0x46, 0x04, 0x04); /* 16bit pixel path */
758 dac_set_mode(par->dac, DAC_RGB0888_16);
759 hmul = 3;
760 hdiv = 2;
761 break;
762 case 7:
763 pr_debug("fb%d: 8/8/8/8 truecolor\n", info->node);
764
765 vga_wseq(NULL, 0x11, 0x1E); /* 32bpp accel mode */
766 svga_wcrt_mask(0x46, 0x04, 0x04); /* 16bit pixel path */
767 dac_set_mode(par->dac, DAC_RGB8888_16);
768 hmul = 2;
769 break;
770 default:
771 printk(KERN_ERR "fb%d: unsupported mode - bug\n", info->node);
772 return -EINVAL;
773 }
774
775 ark_set_pixclock(info, (hdiv * info->var.pixclock) / hmul);
776 svga_set_timings(&ark_timing_regs, &(info->var), hmul, hdiv,
777 (info->var.vmode & FB_VMODE_DOUBLE) ? 2 : 1,
778 (info->var.vmode & FB_VMODE_INTERLACED) ? 2 : 1,
779 hmul, info->node);
780
781 /* Set interlaced mode start/end register */
782 value = info->var.xres + info->var.left_margin + info->var.right_margin + info->var.hsync_len;
783 value = ((value * hmul / hdiv) / 8) - 5;
784 vga_wcrt(NULL, 0x42, (value + 1) / 2);
785
786 memset_io(info->screen_base, 0x00, screen_size);
787 /* Device and screen back on */
788 svga_wcrt_mask(0x17, 0x80, 0x80);
789 svga_wseq_mask(0x01, 0x00, 0x20);
790
791 return 0;
792}
793
794/* Set a colour register */
795
796static int arkfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
797 u_int transp, struct fb_info *fb)
798{
799 switch (fb->var.bits_per_pixel) {
800 case 0:
801 case 4:
802 if (regno >= 16)
803 return -EINVAL;
804
805 if ((fb->var.bits_per_pixel == 4) &&
806 (fb->var.nonstd == 0)) {
807 outb(0xF0, VGA_PEL_MSK);
808 outb(regno*16, VGA_PEL_IW);
809 } else {
810 outb(0x0F, VGA_PEL_MSK);
811 outb(regno, VGA_PEL_IW);
812 }
813 outb(red >> 10, VGA_PEL_D);
814 outb(green >> 10, VGA_PEL_D);
815 outb(blue >> 10, VGA_PEL_D);
816 break;
817 case 8:
818 if (regno >= 256)
819 return -EINVAL;
820
821 outb(0xFF, VGA_PEL_MSK);
822 outb(regno, VGA_PEL_IW);
823 outb(red >> 10, VGA_PEL_D);
824 outb(green >> 10, VGA_PEL_D);
825 outb(blue >> 10, VGA_PEL_D);
826 break;
827 case 16:
828 if (regno >= 16)
829 return 0;
830
831 if (fb->var.green.length == 5)
832 ((u32*)fb->pseudo_palette)[regno] = ((red & 0xF800) >> 1) |
833 ((green & 0xF800) >> 6) | ((blue & 0xF800) >> 11);
834 else if (fb->var.green.length == 6)
835 ((u32*)fb->pseudo_palette)[regno] = (red & 0xF800) |
836 ((green & 0xFC00) >> 5) | ((blue & 0xF800) >> 11);
837 else
838 return -EINVAL;
839 break;
840 case 24:
841 case 32:
842 if (regno >= 16)
843 return 0;
844
845 ((u32*)fb->pseudo_palette)[regno] = ((red & 0xFF00) << 8) |
846 (green & 0xFF00) | ((blue & 0xFF00) >> 8);
847 break;
848 default:
849 return -EINVAL;
850 }
851
852 return 0;
853}
854
855/* Set the display blanking state */
856
857static int arkfb_blank(int blank_mode, struct fb_info *info)
858{
859 switch (blank_mode) {
860 case FB_BLANK_UNBLANK:
861 pr_debug("fb%d: unblank\n", info->node);
862 svga_wseq_mask(0x01, 0x00, 0x20);
863 svga_wcrt_mask(0x17, 0x80, 0x80);
864 break;
865 case FB_BLANK_NORMAL:
866 pr_debug("fb%d: blank\n", info->node);
867 svga_wseq_mask(0x01, 0x20, 0x20);
868 svga_wcrt_mask(0x17, 0x80, 0x80);
869 break;
870 case FB_BLANK_POWERDOWN:
871 case FB_BLANK_HSYNC_SUSPEND:
872 case FB_BLANK_VSYNC_SUSPEND:
873 pr_debug("fb%d: sync down\n", info->node);
874 svga_wseq_mask(0x01, 0x20, 0x20);
875 svga_wcrt_mask(0x17, 0x00, 0x80);
876 break;
877 }
878 return 0;
879}
880
881
882/* Pan the display */
883
884static int arkfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
885{
886 unsigned int offset;
887
888 /* Calculate the offset */
889 if (var->bits_per_pixel == 0) {
890 offset = (var->yoffset / 16) * (var->xres_virtual / 2) + (var->xoffset / 2);
891 offset = offset >> 2;
892 } else {
893 offset = (var->yoffset * info->fix.line_length) +
894 (var->xoffset * var->bits_per_pixel / 8);
895 offset = offset >> ((var->bits_per_pixel == 4) ? 2 : 3);
896 }
897
898 /* Set the offset */
899 svga_wcrt_multi(ark_start_address_regs, offset);
900
901 return 0;
902}
903
904
905/* ------------------------------------------------------------------------- */
906
907
908/* Frame buffer operations */
909
910static struct fb_ops arkfb_ops = {
911 .owner = THIS_MODULE,
912 .fb_open = arkfb_open,
913 .fb_release = arkfb_release,
914 .fb_check_var = arkfb_check_var,
915 .fb_set_par = arkfb_set_par,
916 .fb_setcolreg = arkfb_setcolreg,
917 .fb_blank = arkfb_blank,
918 .fb_pan_display = arkfb_pan_display,
919 .fb_fillrect = arkfb_fillrect,
920 .fb_copyarea = cfb_copyarea,
921 .fb_imageblit = arkfb_imageblit,
922 .fb_get_caps = svga_get_caps,
923};
924
925
926/* ------------------------------------------------------------------------- */
927
928
929/* PCI probe */
930static int __devinit ark_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
931{
932 struct fb_info *info;
933 struct arkfb_info *par;
934 int rc;
935 u8 regval;
936
937 /* Ignore secondary VGA device because there is no VGA arbitration */
938 if (! svga_primary_device(dev)) {
939 dev_info(&(dev->dev), "ignoring secondary device\n");
940 return -ENODEV;
941 }
942
943 /* Allocate and fill driver data structure */
944 info = framebuffer_alloc(sizeof(struct arkfb_info), NULL);
945 if (! info) {
946 dev_err(&(dev->dev), "cannot allocate memory\n");
947 return -ENOMEM;
948 }
949
950 par = info->par;
951 mutex_init(&par->open_lock);
952
953 info->flags = FBINFO_PARTIAL_PAN_OK | FBINFO_HWACCEL_YPAN;
954 info->fbops = &arkfb_ops;
955
956 /* Prepare PCI device */
957 rc = pci_enable_device(dev);
958 if (rc < 0) {
959 dev_err(&(dev->dev), "cannot enable PCI device\n");
960 goto err_enable_device;
961 }
962
963 rc = pci_request_regions(dev, "arkfb");
964 if (rc < 0) {
965 dev_err(&(dev->dev), "cannot reserve framebuffer region\n");
966 goto err_request_regions;
967 }
968
969 par->dac = ics5342_init(ark_dac_read_regs, ark_dac_write_regs, info);
970 if (! par->dac) {
971 rc = -ENOMEM;
972 dev_err(&(dev->dev), "RAMDAC initialization failed\n");
973 goto err_dac;
974 }
975
976 info->fix.smem_start = pci_resource_start(dev, 0);
977 info->fix.smem_len = pci_resource_len(dev, 0);
978
979 /* Map physical IO memory address into kernel space */
980 info->screen_base = pci_iomap(dev, 0, 0);
981 if (! info->screen_base) {
982 rc = -ENOMEM;
983 dev_err(&(dev->dev), "iomap for framebuffer failed\n");
984 goto err_iomap;
985 }
986
987 /* FIXME get memsize */
988 regval = vga_rseq(NULL, 0x10);
989 info->screen_size = (1 << (regval >> 6)) << 20;
990 info->fix.smem_len = info->screen_size;
991
992 strcpy(info->fix.id, "ARK 2000PV");
993 info->fix.mmio_start = 0;
994 info->fix.mmio_len = 0;
995 info->fix.type = FB_TYPE_PACKED_PIXELS;
996 info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
997 info->fix.ypanstep = 0;
998 info->fix.accel = FB_ACCEL_NONE;
999 info->pseudo_palette = (void*) (par->pseudo_palette);
1000
1001 /* Prepare startup mode */
1002 rc = fb_find_mode(&(info->var), info, mode, NULL, 0, NULL, 8);
1003 if (! ((rc == 1) || (rc == 2))) {
1004 rc = -EINVAL;
1005 dev_err(&(dev->dev), "mode %s not found\n", mode);
1006 goto err_find_mode;
1007 }
1008
1009 rc = fb_alloc_cmap(&info->cmap, 256, 0);
1010 if (rc < 0) {
1011 dev_err(&(dev->dev), "cannot allocate colormap\n");
1012 goto err_alloc_cmap;
1013 }
1014
1015 rc = register_framebuffer(info);
1016 if (rc < 0) {
1017 dev_err(&(dev->dev), "cannot register framebugger\n");
1018 goto err_reg_fb;
1019 }
1020
1021 printk(KERN_INFO "fb%d: %s on %s, %d MB RAM\n", info->node, info->fix.id,
1022 pci_name(dev), info->fix.smem_len >> 20);
1023
1024 /* Record a reference to the driver data */
1025 pci_set_drvdata(dev, info);
1026
1027#ifdef CONFIG_MTRR
1028 if (mtrr) {
1029 par->mtrr_reg = -1;
1030 par->mtrr_reg = mtrr_add(info->fix.smem_start, info->fix.smem_len, MTRR_TYPE_WRCOMB, 1);
1031 }
1032#endif
1033
1034 return 0;
1035
1036 /* Error handling */
1037err_reg_fb:
1038 fb_dealloc_cmap(&info->cmap);
1039err_alloc_cmap:
1040err_find_mode:
1041 pci_iounmap(dev, info->screen_base);
1042err_iomap:
1043 dac_release(par->dac);
1044err_dac:
1045 pci_release_regions(dev);
1046err_request_regions:
1047/* pci_disable_device(dev); */
1048err_enable_device:
1049 framebuffer_release(info);
1050 return rc;
1051}
1052
1053/* PCI remove */
1054
1055static void __devexit ark_pci_remove(struct pci_dev *dev)
1056{
1057 struct fb_info *info = pci_get_drvdata(dev);
1058 struct arkfb_info *par = info->par;
1059
1060 if (info) {
1061#ifdef CONFIG_MTRR
1062 if (par->mtrr_reg >= 0) {
1063 mtrr_del(par->mtrr_reg, 0, 0);
1064 par->mtrr_reg = -1;
1065 }
1066#endif
1067
1068 dac_release(par->dac);
1069 unregister_framebuffer(info);
1070 fb_dealloc_cmap(&info->cmap);
1071
1072 pci_iounmap(dev, info->screen_base);
1073 pci_release_regions(dev);
1074/* pci_disable_device(dev); */
1075
1076 pci_set_drvdata(dev, NULL);
1077 framebuffer_release(info);
1078 }
1079}
1080
1081
1082#ifdef CONFIG_PM
1083/* PCI suspend */
1084
1085static int ark_pci_suspend (struct pci_dev* dev, pm_message_t state)
1086{
1087 struct fb_info *info = pci_get_drvdata(dev);
1088 struct arkfb_info *par = info->par;
1089
1090 dev_info(&(dev->dev), "suspend\n");
1091
1092 acquire_console_sem();
1093 mutex_lock(&(par->open_lock));
1094
1095 if ((state.event == PM_EVENT_FREEZE) || (par->ref_count == 0)) {
1096 mutex_unlock(&(par->open_lock));
1097 release_console_sem();
1098 return 0;
1099 }
1100
1101 fb_set_suspend(info, 1);
1102
1103 pci_save_state(dev);
1104 pci_disable_device(dev);
1105 pci_set_power_state(dev, pci_choose_state(dev, state));
1106
1107 mutex_unlock(&(par->open_lock));
1108 release_console_sem();
1109
1110 return 0;
1111}
1112
1113
1114/* PCI resume */
1115
1116static int ark_pci_resume (struct pci_dev* dev)
1117{
1118 struct fb_info *info = pci_get_drvdata(dev);
1119 struct arkfb_info *par = info->par;
1120
1121 dev_info(&(dev->dev), "resume\n");
1122
1123 acquire_console_sem();
1124 mutex_lock(&(par->open_lock));
1125
1126 if (par->ref_count == 0) {
1127 mutex_unlock(&(par->open_lock));
1128 release_console_sem();
1129 return 0;
1130 }
1131
1132 pci_set_power_state(dev, PCI_D0);
1133 pci_restore_state(dev);
1134
1135 if (pci_enable_device(dev))
1136 goto fail;
1137
1138 pci_set_master(dev);
1139
1140 arkfb_set_par(info);
1141 fb_set_suspend(info, 0);
1142
1143 mutex_unlock(&(par->open_lock));
1144fail:
1145 release_console_sem();
1146 return 0;
1147}
1148#else
1149#define ark_pci_suspend NULL
1150#define ark_pci_resume NULL
1151#endif /* CONFIG_PM */
1152
1153/* List of boards that we are trying to support */
1154
1155static struct pci_device_id ark_devices[] __devinitdata = {
1156 {PCI_DEVICE(0xEDD8, 0xA099)},
1157 {0, 0, 0, 0, 0, 0, 0}
1158};
1159
1160
1161MODULE_DEVICE_TABLE(pci, ark_devices);
1162
1163static struct pci_driver arkfb_pci_driver = {
1164 .name = "arkfb",
1165 .id_table = ark_devices,
1166 .probe = ark_pci_probe,
1167 .remove = __devexit_p(ark_pci_remove),
1168 .suspend = ark_pci_suspend,
1169 .resume = ark_pci_resume,
1170};
1171
1172/* Cleanup */
1173
1174static void __exit arkfb_cleanup(void)
1175{
1176 pr_debug("arkfb: cleaning up\n");
1177 pci_unregister_driver(&arkfb_pci_driver);
1178}
1179
1180/* Driver Initialisation */
1181
1182static int __init arkfb_init(void)
1183{
1184
1185#ifndef MODULE
1186 char *option = NULL;
1187
1188 if (fb_get_options("arkfb", &option))
1189 return -ENODEV;
1190
1191 if (option && *option)
1192 mode = option;
1193#endif
1194
1195 pr_debug("arkfb: initializing\n");
1196 return pci_register_driver(&arkfb_pci_driver);
1197}
1198
1199module_init(arkfb_init);
1200module_exit(arkfb_cleanup);
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 08d4e11d9121..38c2e2558f5e 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1236,6 +1236,10 @@ fb_mmap(struct file *file, struct vm_area_struct * vma)
1236 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; 1236 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
1237#elif defined(__arm__) || defined(__sh__) || defined(__m32r__) 1237#elif defined(__arm__) || defined(__sh__) || defined(__m32r__)
1238 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 1238 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1239#elif defined(__avr32__)
1240 vma->vm_page_prot = __pgprot((pgprot_val(vma->vm_page_prot)
1241 & ~_PAGE_CACHABLE)
1242 | (_PAGE_BUFFER | _PAGE_DIRTY));
1239#elif defined(__ia64__) 1243#elif defined(__ia64__)
1240 if (efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start)) 1244 if (efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start))
1241 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 1245 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
diff --git a/drivers/video/nvidia/nv_hw.c b/drivers/video/nvidia/nv_hw.c
index f297c7b14a41..c627955aa124 100644
--- a/drivers/video/nvidia/nv_hw.c
+++ b/drivers/video/nvidia/nv_hw.c
@@ -149,8 +149,7 @@ static void nvGetClocks(struct nvidia_par *par, unsigned int *MClk,
149 pll = NV_RD32(par->PMC, 0x4024); 149 pll = NV_RD32(par->PMC, 0x4024);
150 M = pll & 0xFF; 150 M = pll & 0xFF;
151 N = (pll >> 8) & 0xFF; 151 N = (pll >> 8) & 0xFF;
152 if (((par->Chipset & 0xfff0) == 0x0290) || 152 if (((par->Chipset & 0xfff0) == 0x0290) || ((par->Chipset & 0xfff0) == 0x0390) || ((par->Chipset & 0xfff0) == 0x02E0)) {
153 ((par->Chipset & 0xfff0) == 0x0390)) {
154 MB = 1; 153 MB = 1;
155 NB = 1; 154 NB = 1;
156 } else { 155 } else {
@@ -963,6 +962,7 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
963 962
964 if (((par->Chipset & 0xfff0) == 0x0090) || 963 if (((par->Chipset & 0xfff0) == 0x0090) ||
965 ((par->Chipset & 0xfff0) == 0x01D0) || 964 ((par->Chipset & 0xfff0) == 0x01D0) ||
965 ((par->Chipset & 0xfff0) == 0x02E0) ||
966 ((par->Chipset & 0xfff0) == 0x0290)) 966 ((par->Chipset & 0xfff0) == 0x0290))
967 regions = 15; 967 regions = 15;
968 for(i = 0; i < regions; i++) { 968 for(i = 0; i < regions; i++) {
@@ -1275,6 +1275,7 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
1275 0x00100000); 1275 0x00100000);
1276 break; 1276 break;
1277 case 0x0090: 1277 case 0x0090:
1278 case 0x02E0:
1278 case 0x0290: 1279 case 0x0290:
1279 NV_WR32(par->PRAMDAC, 0x0608, 1280 NV_WR32(par->PRAMDAC, 0x0608,
1280 NV_RD32(par->PRAMDAC, 0x0608) | 1281 NV_RD32(par->PRAMDAC, 0x0608) |
@@ -1352,6 +1353,7 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
1352 } else { 1353 } else {
1353 if (((par->Chipset & 0xfff0) == 0x0090) || 1354 if (((par->Chipset & 0xfff0) == 0x0090) ||
1354 ((par->Chipset & 0xfff0) == 0x01D0) || 1355 ((par->Chipset & 0xfff0) == 0x01D0) ||
1356 ((par->Chipset & 0xfff0) == 0x02E0) ||
1355 ((par->Chipset & 0xfff0) == 0x0290)) { 1357 ((par->Chipset & 0xfff0) == 0x0290)) {
1356 for (i = 0; i < 60; i++) { 1358 for (i = 0; i < 60; i++) {
1357 NV_WR32(par->PGRAPH, 1359 NV_WR32(par->PGRAPH,
@@ -1403,6 +1405,7 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
1403 } else { 1405 } else {
1404 if ((par->Chipset & 0xfff0) == 0x0090 || 1406 if ((par->Chipset & 0xfff0) == 0x0090 ||
1405 (par->Chipset & 0xfff0) == 0x01D0 || 1407 (par->Chipset & 0xfff0) == 0x01D0 ||
1408 (par->Chipset & 0xfff0) == 0x02E0 ||
1406 (par->Chipset & 0xfff0) == 0x0290) { 1409 (par->Chipset & 0xfff0) == 0x0290) {
1407 NV_WR32(par->PGRAPH, 0x0DF0, 1410 NV_WR32(par->PGRAPH, 0x0DF0,
1408 NV_RD32(par->PFB, 0x0200)); 1411 NV_RD32(par->PFB, 0x0200));
diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
index 7c36b5fe582e..f85edf084da3 100644
--- a/drivers/video/nvidia/nvidia.c
+++ b/drivers/video/nvidia/nvidia.c
@@ -1243,6 +1243,7 @@ static u32 __devinit nvidia_get_arch(struct fb_info *info)
1243 case 0x0140: /* GeForce 6600 */ 1243 case 0x0140: /* GeForce 6600 */
1244 case 0x0160: /* GeForce 6200 */ 1244 case 0x0160: /* GeForce 6200 */
1245 case 0x01D0: /* GeForce 7200, 7300, 7400 */ 1245 case 0x01D0: /* GeForce 7200, 7300, 7400 */
1246 case 0x02E0: /* GeForce 7300 GT */
1246 case 0x0090: /* GeForce 7800 */ 1247 case 0x0090: /* GeForce 7800 */
1247 case 0x0210: /* GeForce 6800 */ 1248 case 0x0210: /* GeForce 6800 */
1248 case 0x0220: /* GeForce 6200 */ 1249 case 0x0220: /* GeForce 6200 */
diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c
index 756fafb41d78..d11735895a01 100644
--- a/drivers/video/s3fb.c
+++ b/drivers/video/s3fb.c
@@ -796,23 +796,6 @@ static int s3fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
796 return 0; 796 return 0;
797} 797}
798 798
799/* Get capabilities of accelerator based on the mode */
800
801static void s3fb_get_caps(struct fb_info *info, struct fb_blit_caps *caps,
802 struct fb_var_screeninfo *var)
803{
804 if (var->bits_per_pixel == 0) {
805 /* can only support 256 8x16 bitmap */
806 caps->x = 1 << (8 - 1);
807 caps->y = 1 << (16 - 1);
808 caps->len = 256;
809 } else {
810 caps->x = ~(u32)0;
811 caps->y = ~(u32)0;
812 caps->len = ~(u32)0;
813 }
814}
815
816/* ------------------------------------------------------------------------- */ 799/* ------------------------------------------------------------------------- */
817 800
818/* Frame buffer operations */ 801/* Frame buffer operations */
@@ -829,7 +812,7 @@ static struct fb_ops s3fb_ops = {
829 .fb_fillrect = s3fb_fillrect, 812 .fb_fillrect = s3fb_fillrect,
830 .fb_copyarea = cfb_copyarea, 813 .fb_copyarea = cfb_copyarea,
831 .fb_imageblit = s3fb_imageblit, 814 .fb_imageblit = s3fb_imageblit,
832 .fb_get_caps = s3fb_get_caps, 815 .fb_get_caps = svga_get_caps,
833}; 816};
834 817
835/* ------------------------------------------------------------------------- */ 818/* ------------------------------------------------------------------------- */
diff --git a/drivers/video/svgalib.c b/drivers/video/svgalib.c
index 079cdc911e48..25df928d37d8 100644
--- a/drivers/video/svgalib.c
+++ b/drivers/video/svgalib.c
@@ -347,6 +347,23 @@ int svga_get_tilemax(struct fb_info *info)
347 return 256; 347 return 256;
348} 348}
349 349
350/* Get capabilities of accelerator based on the mode */
351
352void svga_get_caps(struct fb_info *info, struct fb_blit_caps *caps,
353 struct fb_var_screeninfo *var)
354{
355 if (var->bits_per_pixel == 0) {
356 /* can only support 256 8x16 bitmap */
357 caps->x = 1 << (8 - 1);
358 caps->y = 1 << (16 - 1);
359 caps->len = 256;
360 } else {
361 caps->x = (var->bits_per_pixel == 4) ? 1 << (8 - 1) : ~(u32)0;
362 caps->y = ~(u32)0;
363 caps->len = ~(u32)0;
364 }
365}
366EXPORT_SYMBOL(svga_get_caps);
350 367
351/* ------------------------------------------------------------------------- */ 368/* ------------------------------------------------------------------------- */
352 369
diff --git a/drivers/video/vt8623fb.c b/drivers/video/vt8623fb.c
new file mode 100644
index 000000000000..5e9755e464a1
--- /dev/null
+++ b/drivers/video/vt8623fb.c
@@ -0,0 +1,927 @@
1/*
2 * linux/drivers/video/vt8623fb.c - fbdev driver for
3 * integrated graphic core in VIA VT8623 [CLE266] chipset
4 *
5 * Copyright (c) 2006-2007 Ondrej Zajicek <santiago@crfreenet.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file COPYING in the main directory of this archive for
9 * more details.
10 *
11 * Code is based on s3fb, some parts are from David Boucher's viafb
12 * (http://davesdomain.org.uk/viafb/)
13 */
14
15#include <linux/version.h>
16#include <linux/module.h>
17#include <linux/kernel.h>
18#include <linux/errno.h>
19#include <linux/string.h>
20#include <linux/mm.h>
21#include <linux/tty.h>
22#include <linux/slab.h>
23#include <linux/delay.h>
24#include <linux/fb.h>
25#include <linux/svga.h>
26#include <linux/init.h>
27#include <linux/pci.h>
28#include <linux/console.h> /* Why should fb driver call console functions? because acquire_console_sem() */
29#include <video/vga.h>
30
31#ifdef CONFIG_MTRR
32#include <asm/mtrr.h>
33#endif
34
35struct vt8623fb_info {
36 char __iomem *mmio_base;
37 int mtrr_reg;
38 struct vgastate state;
39 struct mutex open_lock;
40 unsigned int ref_count;
41 u32 pseudo_palette[16];
42};
43
44
45
46/* ------------------------------------------------------------------------- */
47
48static const struct svga_fb_format vt8623fb_formats[] = {
49 { 0, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0,
50 FB_TYPE_TEXT, FB_AUX_TEXT_SVGA_STEP8, FB_VISUAL_PSEUDOCOLOR, 16, 16},
51 { 4, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0,
52 FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_PSEUDOCOLOR, 16, 16},
53 { 4, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 1,
54 FB_TYPE_INTERLEAVED_PLANES, 1, FB_VISUAL_PSEUDOCOLOR, 16, 16},
55 { 8, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0,
56 FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_PSEUDOCOLOR, 8, 8},
57/* {16, {10, 5, 0}, {5, 5, 0}, {0, 5, 0}, {0, 0, 0}, 0,
58 FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_TRUECOLOR, 4, 4}, */
59 {16, {11, 5, 0}, {5, 6, 0}, {0, 5, 0}, {0, 0, 0}, 0,
60 FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_TRUECOLOR, 4, 4},
61 {32, {16, 8, 0}, {8, 8, 0}, {0, 8, 0}, {0, 0, 0}, 0,
62 FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_TRUECOLOR, 2, 2},
63 SVGA_FORMAT_END
64};
65
66static const struct svga_pll vt8623_pll = {2, 127, 2, 7, 0, 3,
67 60000, 300000, 14318};
68
69/* CRT timing register sets */
70
71struct vga_regset vt8623_h_total_regs[] = {{0x00, 0, 7}, {0x36, 3, 3}, VGA_REGSET_END};
72struct vga_regset vt8623_h_display_regs[] = {{0x01, 0, 7}, VGA_REGSET_END};
73struct vga_regset vt8623_h_blank_start_regs[] = {{0x02, 0, 7}, VGA_REGSET_END};
74struct vga_regset vt8623_h_blank_end_regs[] = {{0x03, 0, 4}, {0x05, 7, 7}, {0x33, 5, 5}, VGA_REGSET_END};
75struct vga_regset vt8623_h_sync_start_regs[] = {{0x04, 0, 7}, {0x33, 4, 4}, VGA_REGSET_END};
76struct vga_regset vt8623_h_sync_end_regs[] = {{0x05, 0, 4}, VGA_REGSET_END};
77
78struct vga_regset vt8623_v_total_regs[] = {{0x06, 0, 7}, {0x07, 0, 0}, {0x07, 5, 5}, {0x35, 0, 0}, VGA_REGSET_END};
79struct vga_regset vt8623_v_display_regs[] = {{0x12, 0, 7}, {0x07, 1, 1}, {0x07, 6, 6}, {0x35, 2, 2}, VGA_REGSET_END};
80struct vga_regset vt8623_v_blank_start_regs[] = {{0x15, 0, 7}, {0x07, 3, 3}, {0x09, 5, 5}, {0x35, 3, 3}, VGA_REGSET_END};
81struct vga_regset vt8623_v_blank_end_regs[] = {{0x16, 0, 7}, VGA_REGSET_END};
82struct vga_regset vt8623_v_sync_start_regs[] = {{0x10, 0, 7}, {0x07, 2, 2}, {0x07, 7, 7}, {0x35, 1, 1}, VGA_REGSET_END};
83struct vga_regset vt8623_v_sync_end_regs[] = {{0x11, 0, 3}, VGA_REGSET_END};
84
85struct vga_regset vt8623_offset_regs[] = {{0x13, 0, 7}, {0x35, 5, 7}, VGA_REGSET_END};
86struct vga_regset vt8623_line_compare_regs[] = {{0x18, 0, 7}, {0x07, 4, 4}, {0x09, 6, 6}, {0x33, 0, 2}, {0x35, 4, 4}, VGA_REGSET_END};
87struct vga_regset vt8623_fetch_count_regs[] = {{0x1C, 0, 7}, {0x1D, 0, 1}, VGA_REGSET_END};
88struct vga_regset vt8623_start_address_regs[] = {{0x0d, 0, 7}, {0x0c, 0, 7}, {0x34, 0, 7}, {0x48, 0, 1}, VGA_REGSET_END};
89
90struct svga_timing_regs vt8623_timing_regs = {
91 vt8623_h_total_regs, vt8623_h_display_regs, vt8623_h_blank_start_regs,
92 vt8623_h_blank_end_regs, vt8623_h_sync_start_regs, vt8623_h_sync_end_regs,
93 vt8623_v_total_regs, vt8623_v_display_regs, vt8623_v_blank_start_regs,
94 vt8623_v_blank_end_regs, vt8623_v_sync_start_regs, vt8623_v_sync_end_regs,
95};
96
97
98/* ------------------------------------------------------------------------- */
99
100
101/* Module parameters */
102
103static char *mode = "640x480-8@60";
104
105#ifdef CONFIG_MTRR
106static int mtrr = 1;
107#endif
108
109MODULE_AUTHOR("(c) 2006 Ondrej Zajicek <santiago@crfreenet.org>");
110MODULE_LICENSE("GPL");
111MODULE_DESCRIPTION("fbdev driver for integrated graphics core in VIA VT8623 [CLE266]");
112
113module_param(mode, charp, 0644);
114MODULE_PARM_DESC(mode, "Default video mode ('640x480-8@60', etc)");
115
116#ifdef CONFIG_MTRR
117module_param(mtrr, int, 0444);
118MODULE_PARM_DESC(mtrr, "Enable write-combining with MTRR (1=enable, 0=disable, default=1)");
119#endif
120
121
122/* ------------------------------------------------------------------------- */
123
124
125static struct fb_tile_ops vt8623fb_tile_ops = {
126 .fb_settile = svga_settile,
127 .fb_tilecopy = svga_tilecopy,
128 .fb_tilefill = svga_tilefill,
129 .fb_tileblit = svga_tileblit,
130 .fb_tilecursor = svga_tilecursor,
131 .fb_get_tilemax = svga_get_tilemax,
132};
133
134
135/* ------------------------------------------------------------------------- */
136
137
138/* image data is MSB-first, fb structure is MSB-first too */
139static inline u32 expand_color(u32 c)
140{
141 return ((c & 1) | ((c & 2) << 7) | ((c & 4) << 14) | ((c & 8) << 21)) * 0xFF;
142}
143
144/* vt8623fb_iplan_imageblit silently assumes that almost everything is 8-pixel aligned */
145static void vt8623fb_iplan_imageblit(struct fb_info *info, const struct fb_image *image)
146{
147 u32 fg = expand_color(image->fg_color);
148 u32 bg = expand_color(image->bg_color);
149 const u8 *src1, *src;
150 u8 __iomem *dst1;
151 u32 __iomem *dst;
152 u32 val;
153 int x, y;
154
155 src1 = image->data;
156 dst1 = info->screen_base + (image->dy * info->fix.line_length)
157 + ((image->dx / 8) * 4);
158
159 for (y = 0; y < image->height; y++) {
160 src = src1;
161 dst = (u32 __iomem *) dst1;
162 for (x = 0; x < image->width; x += 8) {
163 val = *(src++) * 0x01010101;
164 val = (val & fg) | (~val & bg);
165 fb_writel(val, dst++);
166 }
167 src1 += image->width / 8;
168 dst1 += info->fix.line_length;
169 }
170}
171
172/* vt8623fb_iplan_fillrect silently assumes that almost everything is 8-pixel aligned */
173static void vt8623fb_iplan_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
174{
175 u32 fg = expand_color(rect->color);
176 u8 __iomem *dst1;
177 u32 __iomem *dst;
178 int x, y;
179
180 dst1 = info->screen_base + (rect->dy * info->fix.line_length)
181 + ((rect->dx / 8) * 4);
182
183 for (y = 0; y < rect->height; y++) {
184 dst = (u32 __iomem *) dst1;
185 for (x = 0; x < rect->width; x += 8) {
186 fb_writel(fg, dst++);
187 }
188 dst1 += info->fix.line_length;
189 }
190}
191
192
193/* image data is MSB-first, fb structure is high-nibble-in-low-byte-first */
194static inline u32 expand_pixel(u32 c)
195{
196 return (((c & 1) << 24) | ((c & 2) << 27) | ((c & 4) << 14) | ((c & 8) << 17) |
197 ((c & 16) << 4) | ((c & 32) << 7) | ((c & 64) >> 6) | ((c & 128) >> 3)) * 0xF;
198}
199
200/* vt8623fb_cfb4_imageblit silently assumes that almost everything is 8-pixel aligned */
201static void vt8623fb_cfb4_imageblit(struct fb_info *info, const struct fb_image *image)
202{
203 u32 fg = image->fg_color * 0x11111111;
204 u32 bg = image->bg_color * 0x11111111;
205 const u8 *src1, *src;
206 u8 __iomem *dst1;
207 u32 __iomem *dst;
208 u32 val;
209 int x, y;
210
211 src1 = image->data;
212 dst1 = info->screen_base + (image->dy * info->fix.line_length)
213 + ((image->dx / 8) * 4);
214
215 for (y = 0; y < image->height; y++) {
216 src = src1;
217 dst = (u32 __iomem *) dst1;
218 for (x = 0; x < image->width; x += 8) {
219 val = expand_pixel(*(src++));
220 val = (val & fg) | (~val & bg);
221 fb_writel(val, dst++);
222 }
223 src1 += image->width / 8;
224 dst1 += info->fix.line_length;
225 }
226}
227
228static void vt8623fb_imageblit(struct fb_info *info, const struct fb_image *image)
229{
230 if ((info->var.bits_per_pixel == 4) && (image->depth == 1)
231 && ((image->width % 8) == 0) && ((image->dx % 8) == 0)) {
232 if (info->fix.type == FB_TYPE_INTERLEAVED_PLANES)
233 vt8623fb_iplan_imageblit(info, image);
234 else
235 vt8623fb_cfb4_imageblit(info, image);
236 } else
237 cfb_imageblit(info, image);
238}
239
240static void vt8623fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
241{
242 if ((info->var.bits_per_pixel == 4)
243 && ((rect->width % 8) == 0) && ((rect->dx % 8) == 0)
244 && (info->fix.type == FB_TYPE_INTERLEAVED_PLANES))
245 vt8623fb_iplan_fillrect(info, rect);
246 else
247 cfb_fillrect(info, rect);
248}
249
250
251/* ------------------------------------------------------------------------- */
252
253
254static void vt8623_set_pixclock(struct fb_info *info, u32 pixclock)
255{
256 u16 m, n, r;
257 u8 regval;
258 int rv;
259
260 rv = svga_compute_pll(&vt8623_pll, 1000000000 / pixclock, &m, &n, &r, info->node);
261 if (rv < 0) {
262 printk(KERN_ERR "fb%d: cannot set requested pixclock, keeping old value\n", info->node);
263 return;
264 }
265
266 /* Set VGA misc register */
267 regval = vga_r(NULL, VGA_MIS_R);
268 vga_w(NULL, VGA_MIS_W, regval | VGA_MIS_ENB_PLL_LOAD);
269
270 /* Set clock registers */
271 vga_wseq(NULL, 0x46, (n | (r << 6)));
272 vga_wseq(NULL, 0x47, m);
273
274 udelay(1000);
275
276 /* PLL reset */
277 svga_wseq_mask(0x40, 0x02, 0x02);
278 svga_wseq_mask(0x40, 0x00, 0x02);
279}
280
281
282static int vt8623fb_open(struct fb_info *info, int user)
283{
284 struct vt8623fb_info *par = info->par;
285
286 mutex_lock(&(par->open_lock));
287 if (par->ref_count == 0) {
288 memset(&(par->state), 0, sizeof(struct vgastate));
289 par->state.flags = VGA_SAVE_MODE | VGA_SAVE_FONTS | VGA_SAVE_CMAP;
290 par->state.num_crtc = 0xA2;
291 par->state.num_seq = 0x50;
292 save_vga(&(par->state));
293 }
294
295 par->ref_count++;
296 mutex_unlock(&(par->open_lock));
297
298 return 0;
299}
300
301static int vt8623fb_release(struct fb_info *info, int user)
302{
303 struct vt8623fb_info *par = info->par;
304
305 mutex_lock(&(par->open_lock));
306 if (par->ref_count == 0) {
307 mutex_unlock(&(par->open_lock));
308 return -EINVAL;
309 }
310
311 if (par->ref_count == 1)
312 restore_vga(&(par->state));
313
314 par->ref_count--;
315 mutex_unlock(&(par->open_lock));
316
317 return 0;
318}
319
320static int vt8623fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
321{
322 int rv, mem, step;
323
324 /* Find appropriate format */
325 rv = svga_match_format (vt8623fb_formats, var, NULL);
326 if (rv < 0)
327 {
328 printk(KERN_ERR "fb%d: unsupported mode requested\n", info->node);
329 return rv;
330 }
331
332 /* Do not allow to have real resoulution larger than virtual */
333 if (var->xres > var->xres_virtual)
334 var->xres_virtual = var->xres;
335
336 if (var->yres > var->yres_virtual)
337 var->yres_virtual = var->yres;
338
339 /* Round up xres_virtual to have proper alignment of lines */
340 step = vt8623fb_formats[rv].xresstep - 1;
341 var->xres_virtual = (var->xres_virtual+step) & ~step;
342
343 /* Check whether have enough memory */
344 mem = ((var->bits_per_pixel * var->xres_virtual) >> 3) * var->yres_virtual;
345 if (mem > info->screen_size)
346 {
347 printk(KERN_ERR "fb%d: not enough framebuffer memory (%d kB requested , %d kB available)\n", info->node, mem >> 10, (unsigned int) (info->screen_size >> 10));
348 return -EINVAL;
349 }
350
351 /* Text mode is limited to 256 kB of memory */
352 if ((var->bits_per_pixel == 0) && (mem > (256*1024)))
353 {
354 printk(KERN_ERR "fb%d: text framebuffer size too large (%d kB requested, 256 kB possible)\n", info->node, mem >> 10);
355 return -EINVAL;
356 }
357
358 rv = svga_check_timings (&vt8623_timing_regs, var, info->node);
359 if (rv < 0)
360 {
361 printk(KERN_ERR "fb%d: invalid timings requested\n", info->node);
362 return rv;
363 }
364
365 /* Interlaced mode not supported */
366 if (var->vmode & FB_VMODE_INTERLACED)
367 return -EINVAL;
368
369 return 0;
370}
371
372
373static int vt8623fb_set_par(struct fb_info *info)
374{
375 u32 mode, offset_value, fetch_value, screen_size;
376 u32 bpp = info->var.bits_per_pixel;
377
378 if (bpp != 0) {
379 info->fix.ypanstep = 1;
380 info->fix.line_length = (info->var.xres_virtual * bpp) / 8;
381
382 info->flags &= ~FBINFO_MISC_TILEBLITTING;
383 info->tileops = NULL;
384
385 /* in 4bpp supports 8p wide tiles only, any tiles otherwise */
386 info->pixmap.blit_x = (bpp == 4) ? (1 << (8 - 1)) : (~(u32)0);
387 info->pixmap.blit_y = ~(u32)0;
388
389 offset_value = (info->var.xres_virtual * bpp) / 64;
390 fetch_value = ((info->var.xres * bpp) / 128) + 4;
391
392 if (bpp == 4)
393 fetch_value = (info->var.xres / 8) + 8; /* + 0 is OK */
394
395 screen_size = info->var.yres_virtual * info->fix.line_length;
396 } else {
397 info->fix.ypanstep = 16;
398 info->fix.line_length = 0;
399
400 info->flags |= FBINFO_MISC_TILEBLITTING;
401 info->tileops = &vt8623fb_tile_ops;
402
403 /* supports 8x16 tiles only */
404 info->pixmap.blit_x = 1 << (8 - 1);
405 info->pixmap.blit_y = 1 << (16 - 1);
406
407 offset_value = info->var.xres_virtual / 16;
408 fetch_value = (info->var.xres / 8) + 8;
409 screen_size = (info->var.xres_virtual * info->var.yres_virtual) / 64;
410 }
411
412 info->var.xoffset = 0;
413 info->var.yoffset = 0;
414 info->var.activate = FB_ACTIVATE_NOW;
415
416 /* Unlock registers */
417 svga_wseq_mask(0x10, 0x01, 0x01);
418 svga_wcrt_mask(0x11, 0x00, 0x80);
419 svga_wcrt_mask(0x47, 0x00, 0x01);
420
421 /* Device, screen and sync off */
422 svga_wseq_mask(0x01, 0x20, 0x20);
423 svga_wcrt_mask(0x36, 0x30, 0x30);
424 svga_wcrt_mask(0x17, 0x00, 0x80);
425
426 /* Set default values */
427 svga_set_default_gfx_regs();
428 svga_set_default_atc_regs();
429 svga_set_default_seq_regs();
430 svga_set_default_crt_regs();
431 svga_wcrt_multi(vt8623_line_compare_regs, 0xFFFFFFFF);
432 svga_wcrt_multi(vt8623_start_address_regs, 0);
433
434 svga_wcrt_multi(vt8623_offset_regs, offset_value);
435 svga_wseq_multi(vt8623_fetch_count_regs, fetch_value);
436
437 if (info->var.vmode & FB_VMODE_DOUBLE)
438 svga_wcrt_mask(0x09, 0x80, 0x80);
439 else
440 svga_wcrt_mask(0x09, 0x00, 0x80);
441
442 svga_wseq_mask(0x1E, 0xF0, 0xF0); // DI/DVP bus
443 svga_wseq_mask(0x2A, 0x0F, 0x0F); // DI/DVP bus
444 svga_wseq_mask(0x16, 0x08, 0xBF); // FIFO read treshold
445 vga_wseq(NULL, 0x17, 0x1F); // FIFO depth
446 vga_wseq(NULL, 0x18, 0x4E);
447 svga_wseq_mask(0x1A, 0x08, 0x08); // enable MMIO ?
448
449 vga_wcrt(NULL, 0x32, 0x00);
450 vga_wcrt(NULL, 0x34, 0x00);
451 vga_wcrt(NULL, 0x6A, 0x80);
452 vga_wcrt(NULL, 0x6A, 0xC0);
453
454 vga_wgfx(NULL, 0x20, 0x00);
455 vga_wgfx(NULL, 0x21, 0x00);
456 vga_wgfx(NULL, 0x22, 0x00);
457
458 /* Set SR15 according to number of bits per pixel */
459 mode = svga_match_format(vt8623fb_formats, &(info->var), &(info->fix));
460 switch (mode) {
461 case 0:
462 pr_debug("fb%d: text mode\n", info->node);
463 svga_set_textmode_vga_regs();
464 svga_wseq_mask(0x15, 0x00, 0xFE);
465 svga_wcrt_mask(0x11, 0x60, 0x70);
466 break;
467 case 1:
468 pr_debug("fb%d: 4 bit pseudocolor\n", info->node);
469 vga_wgfx(NULL, VGA_GFX_MODE, 0x40);
470 svga_wseq_mask(0x15, 0x20, 0xFE);
471 svga_wcrt_mask(0x11, 0x00, 0x70);
472 break;
473 case 2:
474 pr_debug("fb%d: 4 bit pseudocolor, planar\n", info->node);
475 svga_wseq_mask(0x15, 0x00, 0xFE);
476 svga_wcrt_mask(0x11, 0x00, 0x70);
477 break;
478 case 3:
479 pr_debug("fb%d: 8 bit pseudocolor\n", info->node);
480 svga_wseq_mask(0x15, 0x22, 0xFE);
481 break;
482 case 4:
483 pr_debug("fb%d: 5/6/5 truecolor\n", info->node);
484 svga_wseq_mask(0x15, 0xB6, 0xFE);
485 break;
486 case 5:
487 pr_debug("fb%d: 8/8/8 truecolor\n", info->node);
488 svga_wseq_mask(0x15, 0xAE, 0xFE);
489 break;
490 default:
491 printk(KERN_ERR "vt8623fb: unsupported mode - bug\n");
492 return (-EINVAL);
493 }
494
495 vt8623_set_pixclock(info, info->var.pixclock);
496 svga_set_timings(&vt8623_timing_regs, &(info->var), 1, 1,
497 (info->var.vmode & FB_VMODE_DOUBLE) ? 2 : 1, 1,
498 1, info->node);
499
500 memset_io(info->screen_base, 0x00, screen_size);
501
502 /* Device and screen back on */
503 svga_wcrt_mask(0x17, 0x80, 0x80);
504 svga_wcrt_mask(0x36, 0x00, 0x30);
505 svga_wseq_mask(0x01, 0x00, 0x20);
506
507 return 0;
508}
509
510
511static int vt8623fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
512 u_int transp, struct fb_info *fb)
513{
514 switch (fb->var.bits_per_pixel) {
515 case 0:
516 case 4:
517 if (regno >= 16)
518 return -EINVAL;
519
520 outb(0x0F, VGA_PEL_MSK);
521 outb(regno, VGA_PEL_IW);
522 outb(red >> 10, VGA_PEL_D);
523 outb(green >> 10, VGA_PEL_D);
524 outb(blue >> 10, VGA_PEL_D);
525 break;
526 case 8:
527 if (regno >= 256)
528 return -EINVAL;
529
530 outb(0xFF, VGA_PEL_MSK);
531 outb(regno, VGA_PEL_IW);
532 outb(red >> 10, VGA_PEL_D);
533 outb(green >> 10, VGA_PEL_D);
534 outb(blue >> 10, VGA_PEL_D);
535 break;
536 case 16:
537 if (regno >= 16)
538 return 0;
539
540 if (fb->var.green.length == 5)
541 ((u32*)fb->pseudo_palette)[regno] = ((red & 0xF800) >> 1) |
542 ((green & 0xF800) >> 6) | ((blue & 0xF800) >> 11);
543 else if (fb->var.green.length == 6)
544 ((u32*)fb->pseudo_palette)[regno] = (red & 0xF800) |
545 ((green & 0xFC00) >> 5) | ((blue & 0xF800) >> 11);
546 else
547 return -EINVAL;
548 break;
549 case 24:
550 case 32:
551 if (regno >= 16)
552 return 0;
553
554 /* ((transp & 0xFF00) << 16) */
555 ((u32*)fb->pseudo_palette)[regno] = ((red & 0xFF00) << 8) |
556 (green & 0xFF00) | ((blue & 0xFF00) >> 8);
557 break;
558 default:
559 return -EINVAL;
560 }
561
562 return 0;
563}
564
565
566static int vt8623fb_blank(int blank_mode, struct fb_info *info)
567{
568 switch (blank_mode) {
569 case FB_BLANK_UNBLANK:
570 pr_debug("fb%d: unblank\n", info->node);
571 svga_wcrt_mask(0x36, 0x00, 0x30);
572 svga_wseq_mask(0x01, 0x00, 0x20);
573 break;
574 case FB_BLANK_NORMAL:
575 pr_debug("fb%d: blank\n", info->node);
576 svga_wcrt_mask(0x36, 0x00, 0x30);
577 svga_wseq_mask(0x01, 0x20, 0x20);
578 break;
579 case FB_BLANK_HSYNC_SUSPEND:
580 pr_debug("fb%d: DPMS standby (hsync off)\n", info->node);
581 svga_wcrt_mask(0x36, 0x10, 0x30);
582 svga_wseq_mask(0x01, 0x20, 0x20);
583 break;
584 case FB_BLANK_VSYNC_SUSPEND:
585 pr_debug("fb%d: DPMS suspend (vsync off)\n", info->node);
586 svga_wcrt_mask(0x36, 0x20, 0x30);
587 svga_wseq_mask(0x01, 0x20, 0x20);
588 break;
589 case FB_BLANK_POWERDOWN:
590 pr_debug("fb%d: DPMS off (no sync)\n", info->node);
591 svga_wcrt_mask(0x36, 0x30, 0x30);
592 svga_wseq_mask(0x01, 0x20, 0x20);
593 break;
594 }
595
596 return 0;
597}
598
599
600static int vt8623fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
601{
602 unsigned int offset;
603
604 /* Calculate the offset */
605 if (var->bits_per_pixel == 0) {
606 offset = (var->yoffset / 16) * var->xres_virtual + var->xoffset;
607 offset = offset >> 3;
608 } else {
609 offset = (var->yoffset * info->fix.line_length) +
610 (var->xoffset * var->bits_per_pixel / 8);
611 offset = offset >> ((var->bits_per_pixel == 4) ? 2 : 1);
612 }
613
614 /* Set the offset */
615 svga_wcrt_multi(vt8623_start_address_regs, offset);
616
617 return 0;
618}
619
620
621/* ------------------------------------------------------------------------- */
622
623
624/* Frame buffer operations */
625
626static struct fb_ops vt8623fb_ops = {
627 .owner = THIS_MODULE,
628 .fb_open = vt8623fb_open,
629 .fb_release = vt8623fb_release,
630 .fb_check_var = vt8623fb_check_var,
631 .fb_set_par = vt8623fb_set_par,
632 .fb_setcolreg = vt8623fb_setcolreg,
633 .fb_blank = vt8623fb_blank,
634 .fb_pan_display = vt8623fb_pan_display,
635 .fb_fillrect = vt8623fb_fillrect,
636 .fb_copyarea = cfb_copyarea,
637 .fb_imageblit = vt8623fb_imageblit,
638 .fb_get_caps = svga_get_caps,
639};
640
641
642/* PCI probe */
643
644static int __devinit vt8623_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
645{
646 struct fb_info *info;
647 struct vt8623fb_info *par;
648 unsigned int memsize1, memsize2;
649 int rc;
650
651 /* Ignore secondary VGA device because there is no VGA arbitration */
652 if (! svga_primary_device(dev)) {
653 dev_info(&(dev->dev), "ignoring secondary device\n");
654 return -ENODEV;
655 }
656
657 /* Allocate and fill driver data structure */
658 info = framebuffer_alloc(sizeof(struct vt8623fb_info), NULL);
659 if (! info) {
660 dev_err(&(dev->dev), "cannot allocate memory\n");
661 return -ENOMEM;
662 }
663
664 par = info->par;
665 mutex_init(&par->open_lock);
666
667 info->flags = FBINFO_PARTIAL_PAN_OK | FBINFO_HWACCEL_YPAN;
668 info->fbops = &vt8623fb_ops;
669
670 /* Prepare PCI device */
671
672 rc = pci_enable_device(dev);
673 if (rc < 0) {
674 dev_err(&(dev->dev), "cannot enable PCI device\n");
675 goto err_enable_device;
676 }
677
678 rc = pci_request_regions(dev, "vt8623fb");
679 if (rc < 0) {
680 dev_err(&(dev->dev), "cannot reserve framebuffer region\n");
681 goto err_request_regions;
682 }
683
684 info->fix.smem_start = pci_resource_start(dev, 0);
685 info->fix.smem_len = pci_resource_len(dev, 0);
686 info->fix.mmio_start = pci_resource_start(dev, 1);
687 info->fix.mmio_len = pci_resource_len(dev, 1);
688
689 /* Map physical IO memory address into kernel space */
690 info->screen_base = pci_iomap(dev, 0, 0);
691 if (! info->screen_base) {
692 rc = -ENOMEM;
693 dev_err(&(dev->dev), "iomap for framebuffer failed\n");
694 goto err_iomap_1;
695 }
696
697 par->mmio_base = pci_iomap(dev, 1, 0);
698 if (! par->mmio_base) {
699 rc = -ENOMEM;
700 dev_err(&(dev->dev), "iomap for MMIO failed\n");
701 goto err_iomap_2;
702 }
703
704 /* Find how many physical memory there is on card */
705 memsize1 = (vga_rseq(NULL, 0x34) + 1) >> 1;
706 memsize2 = vga_rseq(NULL, 0x39) << 2;
707
708 if ((16 <= memsize1) && (memsize1 <= 64) && (memsize1 == memsize2))
709 info->screen_size = memsize1 << 20;
710 else {
711 dev_err(&(dev->dev), "memory size detection failed (%x %x), suppose 16 MB\n", memsize1, memsize2);
712 info->screen_size = 16 << 20;
713 }
714
715 info->fix.smem_len = info->screen_size;
716 strcpy(info->fix.id, "VIA VT8623");
717 info->fix.type = FB_TYPE_PACKED_PIXELS;
718 info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
719 info->fix.ypanstep = 0;
720 info->fix.accel = FB_ACCEL_NONE;
721 info->pseudo_palette = (void*)par->pseudo_palette;
722
723 /* Prepare startup mode */
724
725 rc = fb_find_mode(&(info->var), info, mode, NULL, 0, NULL, 8);
726 if (! ((rc == 1) || (rc == 2))) {
727 rc = -EINVAL;
728 dev_err(&(dev->dev), "mode %s not found\n", mode);
729 goto err_find_mode;
730 }
731
732 rc = fb_alloc_cmap(&info->cmap, 256, 0);
733 if (rc < 0) {
734 dev_err(&(dev->dev), "cannot allocate colormap\n");
735 goto err_alloc_cmap;
736 }
737
738 rc = register_framebuffer(info);
739 if (rc < 0) {
740 dev_err(&(dev->dev), "cannot register framebugger\n");
741 goto err_reg_fb;
742 }
743
744 printk(KERN_INFO "fb%d: %s on %s, %d MB RAM\n", info->node, info->fix.id,
745 pci_name(dev), info->fix.smem_len >> 20);
746
747 /* Record a reference to the driver data */
748 pci_set_drvdata(dev, info);
749
750#ifdef CONFIG_MTRR
751 if (mtrr) {
752 par->mtrr_reg = -1;
753 par->mtrr_reg = mtrr_add(info->fix.smem_start, info->fix.smem_len, MTRR_TYPE_WRCOMB, 1);
754 }
755#endif
756
757 return 0;
758
759 /* Error handling */
760err_reg_fb:
761 fb_dealloc_cmap(&info->cmap);
762err_alloc_cmap:
763err_find_mode:
764 pci_iounmap(dev, par->mmio_base);
765err_iomap_2:
766 pci_iounmap(dev, info->screen_base);
767err_iomap_1:
768 pci_release_regions(dev);
769err_request_regions:
770/* pci_disable_device(dev); */
771err_enable_device:
772 framebuffer_release(info);
773 return rc;
774}
775
776/* PCI remove */
777
778static void __devexit vt8623_pci_remove(struct pci_dev *dev)
779{
780 struct fb_info *info = pci_get_drvdata(dev);
781 struct vt8623fb_info *par = info->par;
782
783 if (info) {
784#ifdef CONFIG_MTRR
785 if (par->mtrr_reg >= 0) {
786 mtrr_del(par->mtrr_reg, 0, 0);
787 par->mtrr_reg = -1;
788 }
789#endif
790
791 unregister_framebuffer(info);
792 fb_dealloc_cmap(&info->cmap);
793
794 pci_iounmap(dev, info->screen_base);
795 pci_iounmap(dev, par->mmio_base);
796 pci_release_regions(dev);
797/* pci_disable_device(dev); */
798
799 pci_set_drvdata(dev, NULL);
800 framebuffer_release(info);
801 }
802}
803
804
805#ifdef CONFIG_PM
806/* PCI suspend */
807
808static int vt8623_pci_suspend(struct pci_dev* dev, pm_message_t state)
809{
810 struct fb_info *info = pci_get_drvdata(dev);
811 struct vt8623fb_info *par = info->par;
812
813 dev_info(&(dev->dev), "suspend\n");
814
815 acquire_console_sem();
816 mutex_lock(&(par->open_lock));
817
818 if ((state.event == PM_EVENT_FREEZE) || (par->ref_count == 0)) {
819 mutex_unlock(&(par->open_lock));
820 release_console_sem();
821 return 0;
822 }
823
824 fb_set_suspend(info, 1);
825
826 pci_save_state(dev);
827 pci_disable_device(dev);
828 pci_set_power_state(dev, pci_choose_state(dev, state));
829
830 mutex_unlock(&(par->open_lock));
831 release_console_sem();
832
833 return 0;
834}
835
836
837/* PCI resume */
838
839static int vt8623_pci_resume(struct pci_dev* dev)
840{
841 struct fb_info *info = pci_get_drvdata(dev);
842 struct vt8623fb_info *par = info->par;
843
844 dev_info(&(dev->dev), "resume\n");
845
846 acquire_console_sem();
847 mutex_lock(&(par->open_lock));
848
849 if (par->ref_count == 0) {
850 mutex_unlock(&(par->open_lock));
851 release_console_sem();
852 return 0;
853 }
854
855 pci_set_power_state(dev, PCI_D0);
856 pci_restore_state(dev);
857
858 if (pci_enable_device(dev))
859 goto fail;
860
861 pci_set_master(dev);
862
863 vt8623fb_set_par(info);
864 fb_set_suspend(info, 0);
865
866 mutex_unlock(&(par->open_lock));
867fail:
868 release_console_sem();
869
870 return 0;
871}
872#else
873#define vt8623_pci_suspend NULL
874#define vt8623_pci_resume NULL
875#endif /* CONFIG_PM */
876
877/* List of boards that we are trying to support */
878
879static struct pci_device_id vt8623_devices[] __devinitdata = {
880 {PCI_DEVICE(PCI_VENDOR_ID_VIA, 0x3122)},
881 {0, 0, 0, 0, 0, 0, 0}
882};
883
884MODULE_DEVICE_TABLE(pci, vt8623_devices);
885
886static struct pci_driver vt8623fb_pci_driver = {
887 .name = "vt8623fb",
888 .id_table = vt8623_devices,
889 .probe = vt8623_pci_probe,
890 .remove = __devexit_p(vt8623_pci_remove),
891 .suspend = vt8623_pci_suspend,
892 .resume = vt8623_pci_resume,
893};
894
895/* Cleanup */
896
897static void __exit vt8623fb_cleanup(void)
898{
899 pr_debug("vt8623fb: cleaning up\n");
900 pci_unregister_driver(&vt8623fb_pci_driver);
901}
902
903/* Driver Initialisation */
904
905int __init vt8623fb_init(void)
906{
907
908#ifndef MODULE
909 char *option = NULL;
910
911 if (fb_get_options("vt8623fb", &option))
912 return -ENODEV;
913
914 if (option && *option)
915 mode = option;
916#endif
917
918 pr_debug("vt8623fb: initializing\n");
919 return pci_register_driver(&vt8623fb_pci_driver);
920}
921
922/* ------------------------------------------------------------------------- */
923
924/* Modularization */
925
926module_init(vt8623fb_init);
927module_exit(vt8623fb_cleanup);