aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/sleep.c8
-rw-r--r--drivers/base/core.c3
-rw-r--r--drivers/char/agp/generic.c4
-rw-r--r--drivers/char/vt.c2
-rw-r--r--drivers/gpu/drm/drm_stub.c11
-rw-r--r--drivers/gpu/drm/drm_sysfs.c1
-rw-r--r--drivers/gpu/drm/via/via_dma.c12
-rw-r--r--drivers/ide/cs5536.c1
-rw-r--r--drivers/ide/hpt366.c14
-rw-r--r--drivers/ide/ide-io.c11
-rw-r--r--drivers/ide/pmac.c2
-rw-r--r--drivers/lguest/lg.h3
-rw-r--r--drivers/lguest/segments.c13
-rw-r--r--drivers/lguest/x86/core.c9
-rw-r--r--drivers/md/bitmap.c7
-rw-r--r--drivers/md/md.c41
-rw-r--r--drivers/md/md.h21
-rw-r--r--drivers/md/raid5.c7
-rw-r--r--drivers/virtio/virtio_balloon.c3
19 files changed, 113 insertions, 60 deletions
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 779e4e500df4..d060e6fd7fd5 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -300,9 +300,9 @@ static int acpi_suspend_state_valid(suspend_state_t pm_state)
300static struct platform_suspend_ops acpi_suspend_ops = { 300static struct platform_suspend_ops acpi_suspend_ops = {
301 .valid = acpi_suspend_state_valid, 301 .valid = acpi_suspend_state_valid,
302 .begin = acpi_suspend_begin, 302 .begin = acpi_suspend_begin,
303 .prepare = acpi_pm_prepare, 303 .prepare_late = acpi_pm_prepare,
304 .enter = acpi_suspend_enter, 304 .enter = acpi_suspend_enter,
305 .finish = acpi_pm_finish, 305 .wake = acpi_pm_finish,
306 .end = acpi_pm_end, 306 .end = acpi_pm_end,
307}; 307};
308 308
@@ -328,9 +328,9 @@ static int acpi_suspend_begin_old(suspend_state_t pm_state)
328static struct platform_suspend_ops acpi_suspend_ops_old = { 328static struct platform_suspend_ops acpi_suspend_ops_old = {
329 .valid = acpi_suspend_state_valid, 329 .valid = acpi_suspend_state_valid,
330 .begin = acpi_suspend_begin_old, 330 .begin = acpi_suspend_begin_old,
331 .prepare = acpi_pm_disable_gpes, 331 .prepare_late = acpi_pm_disable_gpes,
332 .enter = acpi_suspend_enter, 332 .enter = acpi_suspend_enter,
333 .finish = acpi_pm_finish, 333 .wake = acpi_pm_finish,
334 .end = acpi_pm_end, 334 .end = acpi_pm_end,
335 .recover = acpi_pm_finish, 335 .recover = acpi_pm_finish,
336}; 336};
diff --git a/drivers/base/core.c b/drivers/base/core.c
index d230ff4b3eec..4aa527b8a913 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -891,7 +891,8 @@ int device_add(struct device *dev)
891 set_dev_node(dev, dev_to_node(parent)); 891 set_dev_node(dev, dev_to_node(parent));
892 892
893 /* first, register with generic layer. */ 893 /* first, register with generic layer. */
894 error = kobject_add(&dev->kobj, dev->kobj.parent, "%s", dev_name(dev)); 894 /* we require the name to be set before, and pass NULL */
895 error = kobject_add(&dev->kobj, dev->kobj.parent, NULL);
895 if (error) 896 if (error)
896 goto Error; 897 goto Error;
897 898
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index 10d6cbd7c05e..2224b762b7fb 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -1226,7 +1226,7 @@ int agp_generic_alloc_pages(struct agp_bridge_data *bridge, struct agp_memory *m
1226 int i, ret = -ENOMEM; 1226 int i, ret = -ENOMEM;
1227 1227
1228 for (i = 0; i < num_pages; i++) { 1228 for (i = 0; i < num_pages; i++) {
1229 page = alloc_page(GFP_KERNEL | GFP_DMA32); 1229 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
1230 /* agp_free_memory() needs gart address */ 1230 /* agp_free_memory() needs gart address */
1231 if (page == NULL) 1231 if (page == NULL)
1232 goto out; 1232 goto out;
@@ -1257,7 +1257,7 @@ void *agp_generic_alloc_page(struct agp_bridge_data *bridge)
1257{ 1257{
1258 struct page * page; 1258 struct page * page;
1259 1259
1260 page = alloc_page(GFP_KERNEL | GFP_DMA32); 1260 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
1261 if (page == NULL) 1261 if (page == NULL)
1262 return NULL; 1262 return NULL;
1263 1263
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index 2c1d133819b5..08151d4de489 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -2274,7 +2274,7 @@ rescan_last_byte:
2274 continue; /* nothing to display */ 2274 continue; /* nothing to display */
2275 } 2275 }
2276 /* Glyph not found */ 2276 /* Glyph not found */
2277 if ((!(vc->vc_utf && !vc->vc_disp_ctrl) && c < 128) && !(c & ~charmask)) { 2277 if ((!(vc->vc_utf && !vc->vc_disp_ctrl) || c < 128) && !(c & ~charmask)) {
2278 /* In legacy mode use the glyph we get by a 1:1 mapping. 2278 /* In legacy mode use the glyph we get by a 1:1 mapping.
2279 This would make absolutely no sense with Unicode in mind, 2279 This would make absolutely no sense with Unicode in mind,
2280 but do this for ASCII characters since a font may lack 2280 but do this for ASCII characters since a font may lack
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index d009661781bc..ef878615c49f 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -159,6 +159,9 @@ void drm_master_put(struct drm_master **master)
159int drm_setmaster_ioctl(struct drm_device *dev, void *data, 159int drm_setmaster_ioctl(struct drm_device *dev, void *data,
160 struct drm_file *file_priv) 160 struct drm_file *file_priv)
161{ 161{
162 if (file_priv->is_master)
163 return 0;
164
162 if (file_priv->minor->master && file_priv->minor->master != file_priv->master) 165 if (file_priv->minor->master && file_priv->minor->master != file_priv->master)
163 return -EINVAL; 166 return -EINVAL;
164 167
@@ -169,6 +172,7 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
169 file_priv->minor->master != file_priv->master) { 172 file_priv->minor->master != file_priv->master) {
170 mutex_lock(&dev->struct_mutex); 173 mutex_lock(&dev->struct_mutex);
171 file_priv->minor->master = drm_master_get(file_priv->master); 174 file_priv->minor->master = drm_master_get(file_priv->master);
175 file_priv->is_master = 1;
172 mutex_unlock(&dev->struct_mutex); 176 mutex_unlock(&dev->struct_mutex);
173 } 177 }
174 178
@@ -178,10 +182,15 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
178int drm_dropmaster_ioctl(struct drm_device *dev, void *data, 182int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
179 struct drm_file *file_priv) 183 struct drm_file *file_priv)
180{ 184{
181 if (!file_priv->master) 185 if (!file_priv->is_master)
182 return -EINVAL; 186 return -EINVAL;
187
188 if (!file_priv->minor->master)
189 return -EINVAL;
190
183 mutex_lock(&dev->struct_mutex); 191 mutex_lock(&dev->struct_mutex);
184 drm_master_put(&file_priv->minor->master); 192 drm_master_put(&file_priv->minor->master);
193 file_priv->is_master = 0;
185 mutex_unlock(&dev->struct_mutex); 194 mutex_unlock(&dev->struct_mutex);
186 return 0; 195 return 0;
187} 196}
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index bc0c6849360c..022876ae34f0 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -132,6 +132,7 @@ void drm_sysfs_destroy(void)
132 */ 132 */
133static void drm_sysfs_device_release(struct device *dev) 133static void drm_sysfs_device_release(struct device *dev)
134{ 134{
135 memset(dev, 0, sizeof(struct device));
135 return; 136 return;
136} 137}
137 138
diff --git a/drivers/gpu/drm/via/via_dma.c b/drivers/gpu/drm/via/via_dma.c
index 7a339dba6a69..bfb92d283260 100644
--- a/drivers/gpu/drm/via/via_dma.c
+++ b/drivers/gpu/drm/via/via_dma.c
@@ -481,11 +481,13 @@ static int via_wait_idle(drm_via_private_t * dev_priv)
481{ 481{
482 int count = 10000000; 482 int count = 10000000;
483 483
484 while (!(VIA_READ(VIA_REG_STATUS) & VIA_VR_QUEUE_BUSY) && count--); 484 while (!(VIA_READ(VIA_REG_STATUS) & VIA_VR_QUEUE_BUSY) && --count)
485 ;
485 486
486 while (count-- && (VIA_READ(VIA_REG_STATUS) & 487 while (count && (VIA_READ(VIA_REG_STATUS) &
487 (VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY | 488 (VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY |
488 VIA_3D_ENG_BUSY))) ; 489 VIA_3D_ENG_BUSY)))
490 --count;
489 return count; 491 return count;
490} 492}
491 493
@@ -705,7 +707,7 @@ static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *
705 switch (d_siz->func) { 707 switch (d_siz->func) {
706 case VIA_CMDBUF_SPACE: 708 case VIA_CMDBUF_SPACE:
707 while (((tmp_size = via_cmdbuf_space(dev_priv)) < d_siz->size) 709 while (((tmp_size = via_cmdbuf_space(dev_priv)) < d_siz->size)
708 && count--) { 710 && --count) {
709 if (!d_siz->wait) { 711 if (!d_siz->wait) {
710 break; 712 break;
711 } 713 }
@@ -717,7 +719,7 @@ static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *
717 break; 719 break;
718 case VIA_CMDBUF_LAG: 720 case VIA_CMDBUF_LAG:
719 while (((tmp_size = via_cmdbuf_lag(dev_priv)) > d_siz->size) 721 while (((tmp_size = via_cmdbuf_lag(dev_priv)) > d_siz->size)
720 && count--) { 722 && --count) {
721 if (!d_siz->wait) { 723 if (!d_siz->wait) {
722 break; 724 break;
723 } 725 }
diff --git a/drivers/ide/cs5536.c b/drivers/ide/cs5536.c
index 353a35bbba63..0332a95eefd4 100644
--- a/drivers/ide/cs5536.c
+++ b/drivers/ide/cs5536.c
@@ -236,6 +236,7 @@ static const struct ide_dma_ops cs5536_dma_ops = {
236 .dma_test_irq = ide_dma_test_irq, 236 .dma_test_irq = ide_dma_test_irq,
237 .dma_lost_irq = ide_dma_lost_irq, 237 .dma_lost_irq = ide_dma_lost_irq,
238 .dma_timer_expiry = ide_dma_sff_timer_expiry, 238 .dma_timer_expiry = ide_dma_sff_timer_expiry,
239 .dma_sff_read_status = ide_dma_sff_read_status,
239}; 240};
240 241
241static const struct ide_port_info cs5536_info = { 242static const struct ide_port_info cs5536_info = {
diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
index a0eb87f59134..0feb66c720e1 100644
--- a/drivers/ide/hpt366.c
+++ b/drivers/ide/hpt366.c
@@ -3,7 +3,7 @@
3 * Portions Copyright (C) 2001 Sun Microsystems, Inc. 3 * Portions Copyright (C) 2001 Sun Microsystems, Inc.
4 * Portions Copyright (C) 2003 Red Hat Inc 4 * Portions Copyright (C) 2003 Red Hat Inc
5 * Portions Copyright (C) 2007 Bartlomiej Zolnierkiewicz 5 * Portions Copyright (C) 2007 Bartlomiej Zolnierkiewicz
6 * Portions Copyright (C) 2005-2008 MontaVista Software, Inc. 6 * Portions Copyright (C) 2005-2009 MontaVista Software, Inc.
7 * 7 *
8 * Thanks to HighPoint Technologies for their assistance, and hardware. 8 * Thanks to HighPoint Technologies for their assistance, and hardware.
9 * Special Thanks to Jon Burchmore in SanDiego for the deep pockets, his 9 * Special Thanks to Jon Burchmore in SanDiego for the deep pockets, his
@@ -114,6 +114,8 @@
114 * the register setting lists into the table indexed by the clock selected 114 * the register setting lists into the table indexed by the clock selected
115 * - set the correct hwif->ultra_mask for each individual chip 115 * - set the correct hwif->ultra_mask for each individual chip
116 * - add Ultra and MW DMA mode filtering for the HPT37[24] based SATA cards 116 * - add Ultra and MW DMA mode filtering for the HPT37[24] based SATA cards
117 * - stop resetting HPT370's state machine before each DMA transfer as that has
118 * caused more harm than good
117 * Sergei Shtylyov, <sshtylyov@ru.mvista.com> or <source@mvista.com> 119 * Sergei Shtylyov, <sshtylyov@ru.mvista.com> or <source@mvista.com>
118 */ 120 */
119 121
@@ -133,7 +135,7 @@
133#define DRV_NAME "hpt366" 135#define DRV_NAME "hpt366"
134 136
135/* various tuning parameters */ 137/* various tuning parameters */
136#define HPT_RESET_STATE_ENGINE 138#undef HPT_RESET_STATE_ENGINE
137#undef HPT_DELAY_INTERRUPT 139#undef HPT_DELAY_INTERRUPT
138 140
139static const char *quirk_drives[] = { 141static const char *quirk_drives[] = {
@@ -808,7 +810,7 @@ static void hpt370_irq_timeout(ide_drive_t *drive)
808 /* get DMA command mode */ 810 /* get DMA command mode */
809 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD); 811 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
810 /* stop DMA */ 812 /* stop DMA */
811 outb(dma_cmd & ~0x1, hwif->dma_base + ATA_DMA_CMD); 813 outb(dma_cmd & ~ATA_DMA_START, hwif->dma_base + ATA_DMA_CMD);
812 hpt370_clear_engine(drive); 814 hpt370_clear_engine(drive);
813} 815}
814 816
@@ -825,11 +827,11 @@ static int hpt370_dma_end(ide_drive_t *drive)
825 ide_hwif_t *hwif = drive->hwif; 827 ide_hwif_t *hwif = drive->hwif;
826 u8 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS); 828 u8 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
827 829
828 if (dma_stat & 0x01) { 830 if (dma_stat & ATA_DMA_ACTIVE) {
829 /* wait a little */ 831 /* wait a little */
830 udelay(20); 832 udelay(20);
831 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS); 833 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
832 if (dma_stat & 0x01) 834 if (dma_stat & ATA_DMA_ACTIVE)
833 hpt370_irq_timeout(drive); 835 hpt370_irq_timeout(drive);
834 } 836 }
835 return ide_dma_end(drive); 837 return ide_dma_end(drive);
@@ -851,7 +853,7 @@ static int hpt374_dma_test_irq(ide_drive_t *drive)
851 853
852 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS); 854 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
853 /* return 1 if INTR asserted */ 855 /* return 1 if INTR asserted */
854 if (dma_stat & 4) 856 if (dma_stat & ATA_DMA_INTR)
855 return 1; 857 return 1;
856 858
857 return 0; 859 return 0;
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 2ae02b8d7f8e..35dc38d3b2c5 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -102,11 +102,14 @@ void ide_complete_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat, u8 err)
102 drive->dev_flags |= IDE_DFLAG_PARKED; 102 drive->dev_flags |= IDE_DFLAG_PARKED;
103 } 103 }
104 104
105 if (rq && rq->cmd_type == REQ_TYPE_ATA_TASKFILE) 105 if (rq && rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
106 memcpy(rq->special, cmd, sizeof(*cmd)); 106 struct ide_cmd *orig_cmd = rq->special;
107 107
108 if (cmd->tf_flags & IDE_TFLAG_DYN) 108 if (cmd->tf_flags & IDE_TFLAG_DYN)
109 kfree(cmd); 109 kfree(orig_cmd);
110 else
111 memcpy(orig_cmd, cmd, sizeof(*cmd));
112 }
110} 113}
111 114
112/* obsolete, blk_rq_bytes() should be used instead */ 115/* obsolete, blk_rq_bytes() should be used instead */
diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c
index 052b9bf1f8fb..f76e4e6b408f 100644
--- a/drivers/ide/pmac.c
+++ b/drivers/ide/pmac.c
@@ -1682,7 +1682,7 @@ static int __devinit pmac_ide_init_dma(ide_hwif_t *hwif,
1682 * The +2 is +1 for the stop command and +1 to allow for 1682 * The +2 is +1 for the stop command and +1 to allow for
1683 * aligning the start address to a multiple of 16 bytes. 1683 * aligning the start address to a multiple of 16 bytes.
1684 */ 1684 */
1685 pmif->dma_table_cpu = (struct dbdma_cmd*)pci_alloc_consistent( 1685 pmif->dma_table_cpu = pci_alloc_consistent(
1686 dev, 1686 dev,
1687 (MAX_DCMDS + 2) * sizeof(struct dbdma_cmd), 1687 (MAX_DCMDS + 2) * sizeof(struct dbdma_cmd),
1688 &hwif->dmatable_dma); 1688 &hwif->dmatable_dma);
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h
index ac8a4a3741b8..af92a176697f 100644
--- a/drivers/lguest/lg.h
+++ b/drivers/lguest/lg.h
@@ -158,7 +158,8 @@ void free_interrupts(void);
158/* segments.c: */ 158/* segments.c: */
159void setup_default_gdt_entries(struct lguest_ro_state *state); 159void setup_default_gdt_entries(struct lguest_ro_state *state);
160void setup_guest_gdt(struct lg_cpu *cpu); 160void setup_guest_gdt(struct lg_cpu *cpu);
161void load_guest_gdt(struct lg_cpu *cpu, unsigned long table, u32 num); 161void load_guest_gdt_entry(struct lg_cpu *cpu, unsigned int i,
162 u32 low, u32 hi);
162void guest_load_tls(struct lg_cpu *cpu, unsigned long tls_array); 163void guest_load_tls(struct lg_cpu *cpu, unsigned long tls_array);
163void copy_gdt(const struct lg_cpu *cpu, struct desc_struct *gdt); 164void copy_gdt(const struct lg_cpu *cpu, struct desc_struct *gdt);
164void copy_gdt_tls(const struct lg_cpu *cpu, struct desc_struct *gdt); 165void copy_gdt_tls(const struct lg_cpu *cpu, struct desc_struct *gdt);
diff --git a/drivers/lguest/segments.c b/drivers/lguest/segments.c
index 4f15439b7f12..7ede64ffeef9 100644
--- a/drivers/lguest/segments.c
+++ b/drivers/lguest/segments.c
@@ -144,18 +144,19 @@ void copy_gdt(const struct lg_cpu *cpu, struct desc_struct *gdt)
144 gdt[i] = cpu->arch.gdt[i]; 144 gdt[i] = cpu->arch.gdt[i];
145} 145}
146 146
147/*H:620 This is where the Guest asks us to load a new GDT (LHCALL_LOAD_GDT). 147/*H:620 This is where the Guest asks us to load a new GDT entry
148 * We copy it from the Guest and tweak the entries. */ 148 * (LHCALL_LOAD_GDT_ENTRY). We tweak the entry and copy it in. */
149void load_guest_gdt(struct lg_cpu *cpu, unsigned long table, u32 num) 149void load_guest_gdt_entry(struct lg_cpu *cpu, u32 num, u32 lo, u32 hi)
150{ 150{
151 /* We assume the Guest has the same number of GDT entries as the 151 /* We assume the Guest has the same number of GDT entries as the
152 * Host, otherwise we'd have to dynamically allocate the Guest GDT. */ 152 * Host, otherwise we'd have to dynamically allocate the Guest GDT. */
153 if (num > ARRAY_SIZE(cpu->arch.gdt)) 153 if (num > ARRAY_SIZE(cpu->arch.gdt))
154 kill_guest(cpu, "too many gdt entries %i", num); 154 kill_guest(cpu, "too many gdt entries %i", num);
155 155
156 /* We read the whole thing in, then fix it up. */ 156 /* Set it up, then fix it. */
157 __lgread(cpu, cpu->arch.gdt, table, num * sizeof(cpu->arch.gdt[0])); 157 cpu->arch.gdt[num].a = lo;
158 fixup_gdt_table(cpu, 0, ARRAY_SIZE(cpu->arch.gdt)); 158 cpu->arch.gdt[num].b = hi;
159 fixup_gdt_table(cpu, num, num+1);
159 /* Mark that the GDT changed so the core knows it has to copy it again, 160 /* Mark that the GDT changed so the core knows it has to copy it again,
160 * even if the Guest is run on the same CPU. */ 161 * even if the Guest is run on the same CPU. */
161 cpu->changed |= CHANGED_GDT; 162 cpu->changed |= CHANGED_GDT;
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index a6b717644be0..1a83910f674f 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -324,6 +324,11 @@ static void rewrite_hypercall(struct lg_cpu *cpu)
324 u8 insn[3] = {0xcd, 0x1f, 0x90}; 324 u8 insn[3] = {0xcd, 0x1f, 0x90};
325 325
326 __lgwrite(cpu, guest_pa(cpu, cpu->regs->eip), insn, sizeof(insn)); 326 __lgwrite(cpu, guest_pa(cpu, cpu->regs->eip), insn, sizeof(insn));
327 /* The above write might have caused a copy of that page to be made
328 * (if it was read-only). We need to make sure the Guest has
329 * up-to-date pagetables. As this doesn't happen often, we can just
330 * drop them all. */
331 guest_pagetable_clear_all(cpu);
327} 332}
328 333
329static bool is_hypercall(struct lg_cpu *cpu) 334static bool is_hypercall(struct lg_cpu *cpu)
@@ -563,8 +568,8 @@ void __exit lguest_arch_host_fini(void)
563int lguest_arch_do_hcall(struct lg_cpu *cpu, struct hcall_args *args) 568int lguest_arch_do_hcall(struct lg_cpu *cpu, struct hcall_args *args)
564{ 569{
565 switch (args->arg0) { 570 switch (args->arg0) {
566 case LHCALL_LOAD_GDT: 571 case LHCALL_LOAD_GDT_ENTRY:
567 load_guest_gdt(cpu, args->arg1, args->arg2); 572 load_guest_gdt_entry(cpu, args->arg1, args->arg2, args->arg3);
568 break; 573 break;
569 case LHCALL_LOAD_IDT_ENTRY: 574 case LHCALL_LOAD_IDT_ENTRY:
570 load_guest_idt_entry(cpu, args->arg1, args->arg2, args->arg3); 575 load_guest_idt_entry(cpu, args->arg1, args->arg2, args->arg3);
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index f8a9f7ab2cb8..1fb91edc7de2 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1479,6 +1479,7 @@ void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector)
1479 s += blocks; 1479 s += blocks;
1480 } 1480 }
1481 bitmap->last_end_sync = jiffies; 1481 bitmap->last_end_sync = jiffies;
1482 sysfs_notify(&bitmap->mddev->kobj, NULL, "sync_completed");
1482} 1483}
1483 1484
1484static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed) 1485static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed)
@@ -1589,7 +1590,7 @@ void bitmap_destroy(mddev_t *mddev)
1589int bitmap_create(mddev_t *mddev) 1590int bitmap_create(mddev_t *mddev)
1590{ 1591{
1591 struct bitmap *bitmap; 1592 struct bitmap *bitmap;
1592 unsigned long blocks = mddev->resync_max_sectors; 1593 sector_t blocks = mddev->resync_max_sectors;
1593 unsigned long chunks; 1594 unsigned long chunks;
1594 unsigned long pages; 1595 unsigned long pages;
1595 struct file *file = mddev->bitmap_file; 1596 struct file *file = mddev->bitmap_file;
@@ -1631,8 +1632,8 @@ int bitmap_create(mddev_t *mddev)
1631 bitmap->chunkshift = ffz(~bitmap->chunksize); 1632 bitmap->chunkshift = ffz(~bitmap->chunksize);
1632 1633
1633 /* now that chunksize and chunkshift are set, we can use these macros */ 1634 /* now that chunksize and chunkshift are set, we can use these macros */
1634 chunks = (blocks + CHUNK_BLOCK_RATIO(bitmap) - 1) / 1635 chunks = (blocks + CHUNK_BLOCK_RATIO(bitmap) - 1) >>
1635 CHUNK_BLOCK_RATIO(bitmap); 1636 CHUNK_BLOCK_SHIFT(bitmap);
1636 pages = (chunks + PAGE_COUNTER_RATIO - 1) / PAGE_COUNTER_RATIO; 1637 pages = (chunks + PAGE_COUNTER_RATIO - 1) / PAGE_COUNTER_RATIO;
1637 1638
1638 BUG_ON(!pages); 1639 BUG_ON(!pages);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index ed5727c089a9..612343fdde94 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2017,6 +2017,8 @@ repeat:
2017 clear_bit(MD_CHANGE_PENDING, &mddev->flags); 2017 clear_bit(MD_CHANGE_PENDING, &mddev->flags);
2018 spin_unlock_irq(&mddev->write_lock); 2018 spin_unlock_irq(&mddev->write_lock);
2019 wake_up(&mddev->sb_wait); 2019 wake_up(&mddev->sb_wait);
2020 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
2021 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
2020 2022
2021} 2023}
2022 2024
@@ -2086,6 +2088,7 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2086 * -writemostly - clears write_mostly 2088 * -writemostly - clears write_mostly
2087 * blocked - sets the Blocked flag 2089 * blocked - sets the Blocked flag
2088 * -blocked - clears the Blocked flag 2090 * -blocked - clears the Blocked flag
2091 * insync - sets Insync providing device isn't active
2089 */ 2092 */
2090 int err = -EINVAL; 2093 int err = -EINVAL;
2091 if (cmd_match(buf, "faulty") && rdev->mddev->pers) { 2094 if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
@@ -2118,6 +2121,9 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2118 md_wakeup_thread(rdev->mddev->thread); 2121 md_wakeup_thread(rdev->mddev->thread);
2119 2122
2120 err = 0; 2123 err = 0;
2124 } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
2125 set_bit(In_sync, &rdev->flags);
2126 err = 0;
2121 } 2127 }
2122 if (!err && rdev->sysfs_state) 2128 if (!err && rdev->sysfs_state)
2123 sysfs_notify_dirent(rdev->sysfs_state); 2129 sysfs_notify_dirent(rdev->sysfs_state);
@@ -2190,7 +2196,7 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2190 } else if (rdev->mddev->pers) { 2196 } else if (rdev->mddev->pers) {
2191 mdk_rdev_t *rdev2; 2197 mdk_rdev_t *rdev2;
2192 /* Activating a spare .. or possibly reactivating 2198 /* Activating a spare .. or possibly reactivating
2193 * if we every get bitmaps working here. 2199 * if we ever get bitmaps working here.
2194 */ 2200 */
2195 2201
2196 if (rdev->raid_disk != -1) 2202 if (rdev->raid_disk != -1)
@@ -3482,12 +3488,15 @@ sync_completed_show(mddev_t *mddev, char *page)
3482{ 3488{
3483 unsigned long max_sectors, resync; 3489 unsigned long max_sectors, resync;
3484 3490
3491 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3492 return sprintf(page, "none\n");
3493
3485 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 3494 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
3486 max_sectors = mddev->resync_max_sectors; 3495 max_sectors = mddev->resync_max_sectors;
3487 else 3496 else
3488 max_sectors = mddev->dev_sectors; 3497 max_sectors = mddev->dev_sectors;
3489 3498
3490 resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active)); 3499 resync = mddev->curr_resync_completed;
3491 return sprintf(page, "%lu / %lu\n", resync, max_sectors); 3500 return sprintf(page, "%lu / %lu\n", resync, max_sectors);
3492} 3501}
3493 3502
@@ -6334,18 +6343,13 @@ void md_do_sync(mddev_t *mddev)
6334 sector_t sectors; 6343 sector_t sectors;
6335 6344
6336 skipped = 0; 6345 skipped = 0;
6337 if (j >= mddev->resync_max) {
6338 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
6339 wait_event(mddev->recovery_wait,
6340 mddev->resync_max > j
6341 || kthread_should_stop());
6342 }
6343 if (kthread_should_stop())
6344 goto interrupted;
6345 6346
6346 if (mddev->curr_resync > mddev->curr_resync_completed && 6347 if ((mddev->curr_resync > mddev->curr_resync_completed &&
6347 (mddev->curr_resync - mddev->curr_resync_completed) 6348 (mddev->curr_resync - mddev->curr_resync_completed)
6348 > (max_sectors >> 4)) { 6349 > (max_sectors >> 4)) ||
6350 (j - mddev->curr_resync_completed)*2
6351 >= mddev->resync_max - mddev->curr_resync_completed
6352 ) {
6349 /* time to update curr_resync_completed */ 6353 /* time to update curr_resync_completed */
6350 blk_unplug(mddev->queue); 6354 blk_unplug(mddev->queue);
6351 wait_event(mddev->recovery_wait, 6355 wait_event(mddev->recovery_wait,
@@ -6353,7 +6357,17 @@ void md_do_sync(mddev_t *mddev)
6353 mddev->curr_resync_completed = 6357 mddev->curr_resync_completed =
6354 mddev->curr_resync; 6358 mddev->curr_resync;
6355 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 6359 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6360 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
6356 } 6361 }
6362
6363 if (j >= mddev->resync_max)
6364 wait_event(mddev->recovery_wait,
6365 mddev->resync_max > j
6366 || kthread_should_stop());
6367
6368 if (kthread_should_stop())
6369 goto interrupted;
6370
6357 sectors = mddev->pers->sync_request(mddev, j, &skipped, 6371 sectors = mddev->pers->sync_request(mddev, j, &skipped,
6358 currspeed < speed_min(mddev)); 6372 currspeed < speed_min(mddev));
6359 if (sectors == 0) { 6373 if (sectors == 0) {
@@ -6461,6 +6475,7 @@ void md_do_sync(mddev_t *mddev)
6461 6475
6462 skip: 6476 skip:
6463 mddev->curr_resync = 0; 6477 mddev->curr_resync = 0;
6478 mddev->curr_resync_completed = 0;
6464 mddev->resync_min = 0; 6479 mddev->resync_min = 0;
6465 mddev->resync_max = MaxSector; 6480 mddev->resync_max = MaxSector;
6466 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 6481 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
diff --git a/drivers/md/md.h b/drivers/md/md.h
index e9b7f54c24d6..8227ab909d44 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -12,10 +12,17 @@
12 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 12 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
13*/ 13*/
14 14
15#ifndef _MD_K_H 15#ifndef _MD_MD_H
16#define _MD_K_H 16#define _MD_MD_H
17 17
18#ifdef CONFIG_BLOCK 18#include <linux/blkdev.h>
19#include <linux/kobject.h>
20#include <linux/list.h>
21#include <linux/mm.h>
22#include <linux/mutex.h>
23#include <linux/timer.h>
24#include <linux/wait.h>
25#include <linux/workqueue.h>
19 26
20#define MaxSector (~(sector_t)0) 27#define MaxSector (~(sector_t)0)
21 28
@@ -408,10 +415,6 @@ static inline void safe_put_page(struct page *p)
408 if (p) put_page(p); 415 if (p) put_page(p);
409} 416}
410 417
411#endif /* CONFIG_BLOCK */
412#endif
413
414
415extern int register_md_personality(struct mdk_personality *p); 418extern int register_md_personality(struct mdk_personality *p);
416extern int unregister_md_personality(struct mdk_personality *p); 419extern int unregister_md_personality(struct mdk_personality *p);
417extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev), 420extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev),
@@ -434,3 +437,5 @@ extern void md_new_event(mddev_t *mddev);
434extern int md_allow_write(mddev_t *mddev); 437extern int md_allow_write(mddev_t *mddev);
435extern void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev); 438extern void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev);
436extern void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors); 439extern void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors);
440
441#endif /* _MD_MD_H */
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 3bbc6d647044..4616bc3a6e71 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3845,6 +3845,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
3845 wait_event(conf->wait_for_overlap, 3845 wait_event(conf->wait_for_overlap,
3846 atomic_read(&conf->reshape_stripes)==0); 3846 atomic_read(&conf->reshape_stripes)==0);
3847 mddev->reshape_position = conf->reshape_progress; 3847 mddev->reshape_position = conf->reshape_progress;
3848 mddev->curr_resync_completed = mddev->curr_resync;
3848 conf->reshape_checkpoint = jiffies; 3849 conf->reshape_checkpoint = jiffies;
3849 set_bit(MD_CHANGE_DEVS, &mddev->flags); 3850 set_bit(MD_CHANGE_DEVS, &mddev->flags);
3850 md_wakeup_thread(mddev->thread); 3851 md_wakeup_thread(mddev->thread);
@@ -3854,6 +3855,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
3854 conf->reshape_safe = mddev->reshape_position; 3855 conf->reshape_safe = mddev->reshape_position;
3855 spin_unlock_irq(&conf->device_lock); 3856 spin_unlock_irq(&conf->device_lock);
3856 wake_up(&conf->wait_for_overlap); 3857 wake_up(&conf->wait_for_overlap);
3858 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
3857 } 3859 }
3858 3860
3859 if (mddev->delta_disks < 0) { 3861 if (mddev->delta_disks < 0) {
@@ -3938,11 +3940,13 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
3938 * then we need to write out the superblock. 3940 * then we need to write out the superblock.
3939 */ 3941 */
3940 sector_nr += reshape_sectors; 3942 sector_nr += reshape_sectors;
3941 if (sector_nr >= mddev->resync_max) { 3943 if ((sector_nr - mddev->curr_resync_completed) * 2
3944 >= mddev->resync_max - mddev->curr_resync_completed) {
3942 /* Cannot proceed until we've updated the superblock... */ 3945 /* Cannot proceed until we've updated the superblock... */
3943 wait_event(conf->wait_for_overlap, 3946 wait_event(conf->wait_for_overlap,
3944 atomic_read(&conf->reshape_stripes) == 0); 3947 atomic_read(&conf->reshape_stripes) == 0);
3945 mddev->reshape_position = conf->reshape_progress; 3948 mddev->reshape_position = conf->reshape_progress;
3949 mddev->curr_resync_completed = mddev->curr_resync;
3946 conf->reshape_checkpoint = jiffies; 3950 conf->reshape_checkpoint = jiffies;
3947 set_bit(MD_CHANGE_DEVS, &mddev->flags); 3951 set_bit(MD_CHANGE_DEVS, &mddev->flags);
3948 md_wakeup_thread(mddev->thread); 3952 md_wakeup_thread(mddev->thread);
@@ -3953,6 +3957,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
3953 conf->reshape_safe = mddev->reshape_position; 3957 conf->reshape_safe = mddev->reshape_position;
3954 spin_unlock_irq(&conf->device_lock); 3958 spin_unlock_irq(&conf->device_lock);
3955 wake_up(&conf->wait_for_overlap); 3959 wake_up(&conf->wait_for_overlap);
3960 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
3956 } 3961 }
3957 return reshape_sectors; 3962 return reshape_sectors;
3958} 3963}
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 59268266b79a..9c76a061a04d 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -190,7 +190,8 @@ static int balloon(void *_vballoon)
190 try_to_freeze(); 190 try_to_freeze();
191 wait_event_interruptible(vb->config_change, 191 wait_event_interruptible(vb->config_change,
192 (diff = towards_target(vb)) != 0 192 (diff = towards_target(vb)) != 0
193 || kthread_should_stop()); 193 || kthread_should_stop()
194 || freezing(current));
194 if (diff > 0) 195 if (diff > 0)
195 fill_balloon(vb, diff); 196 fill_balloon(vb, diff);
196 else if (diff < 0) 197 else if (diff < 0)