aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/cciss.c15
-rw-r--r--drivers/block/cciss_cmd.h1
-rw-r--r--drivers/block/floppy.c5
-rw-r--r--drivers/char/tty_ldisc.c15
-rw-r--r--drivers/clocksource/sh_tmu.c2
-rw-r--r--drivers/edac/edac_core.h4
-rw-r--r--drivers/edac/edac_mc_sysfs.c4
-rw-r--r--drivers/edac/mpc85xx_edac.c6
-rw-r--r--drivers/edac/mpc85xx_edac.h1
-rw-r--r--drivers/gpio/pl061.c20
-rw-r--r--drivers/ide/ide-acpi.c37
-rw-r--r--drivers/ide/ide-cd.c14
-rw-r--r--drivers/ide/ide-devsets.c2
-rw-r--r--drivers/ide/ide-eh.c2
-rw-r--r--drivers/ide/ide-floppy.c2
-rw-r--r--drivers/ide/ide-io.c14
-rw-r--r--drivers/ide/ide-ioctls.c3
-rw-r--r--drivers/ide/ide-pm.c30
-rw-r--r--drivers/leds/Kconfig14
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/leds-alix2.c7
-rw-r--r--drivers/leds/leds-bd2802.c96
-rw-r--r--drivers/leds/leds-gpio.c22
-rw-r--r--drivers/leds/leds-lp3944.c466
-rw-r--r--drivers/leds/leds-pca9532.c58
-rw-r--r--drivers/lguest/lg.h2
-rw-r--r--drivers/lguest/lguest_user.c4
-rw-r--r--drivers/md/dm-exception-store.c9
-rw-r--r--drivers/md/dm-table.c2
-rw-r--r--drivers/md/dm.c4
-rw-r--r--drivers/md/linear.c4
-rw-r--r--drivers/md/md.c56
-rw-r--r--drivers/md/multipath.c7
-rw-r--r--drivers/md/raid0.c9
-rw-r--r--drivers/md/raid1.c9
-rw-r--r--drivers/md/raid10.c19
-rw-r--r--drivers/md/raid5.c28
-rw-r--r--drivers/mmc/host/mmc_spi.c6
-rw-r--r--drivers/mtd/cmdlinepart.c2
-rw-r--r--drivers/mtd/devices/m25p80.c2
-rw-r--r--drivers/mtd/inftlcore.c11
-rw-r--r--drivers/mtd/maps/integrator-flash.c22
-rw-r--r--drivers/mtd/nand/atmel_nand.c2
-rw-r--r--drivers/mtd/nand/omap2.c7
-rw-r--r--drivers/mtd/nftlcore.c16
-rw-r--r--drivers/net/atl1c/atl1c_ethtool.c2
-rw-r--r--drivers/net/atl1e/atl1e_ethtool.c2
-rw-r--r--drivers/net/benet/be.h2
-rw-r--r--drivers/net/benet/be_ethtool.c4
-rw-r--r--drivers/net/benet/be_main.c45
-rw-r--r--drivers/net/bnx2x_main.c10
-rw-r--r--drivers/net/cpmac.c2
-rw-r--r--drivers/net/e1000/e1000_main.c11
-rw-r--r--drivers/net/e1000e/netdev.c3
-rw-r--r--drivers/net/fsl_pq_mdio.c8
-rw-r--r--drivers/net/igb/igb_main.c14
-rw-r--r--drivers/net/irda/bfin_sir.c16
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c5
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c45
-rw-r--r--drivers/net/mdio.c4
-rw-r--r--drivers/net/sh_eth.c9
-rw-r--r--drivers/net/sky2.c2
-rw-r--r--drivers/net/usb/cdc_eem.c2
-rw-r--r--drivers/net/usb/dm9601.c10
-rw-r--r--drivers/net/usb/net1080.c12
-rw-r--r--drivers/net/usb/rndis_host.c2
-rw-r--r--drivers/net/usb/smsc95xx.c10
-rw-r--r--drivers/net/usb/usbnet.c30
-rw-r--r--drivers/net/veth.c41
-rw-r--r--drivers/parport/parport_pc.c5
-rw-r--r--drivers/pci/intel-iommu.c704
-rw-r--r--drivers/pci/iova.c26
-rw-r--r--drivers/platform/x86/Kconfig3
-rw-r--r--drivers/rtc/rtc-bfin.c30
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_iscsi.c4
-rw-r--r--drivers/scsi/fnic/fnic_main.c8
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c7
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c7
-rw-r--r--drivers/scsi/scsi_transport_fc.c5
-rw-r--r--drivers/scsi/sg.c4
-rw-r--r--drivers/scsi/zalon.c2
-rw-r--r--drivers/serial/8250_pci.c6
-rw-r--r--drivers/spi/omap_uwire.c2
-rw-r--r--drivers/spi/spi_bitbang.c24
-rw-r--r--drivers/spi/spidev.c17
-rw-r--r--drivers/video/atafb.c7
-rw-r--r--drivers/video/atmel_lcdfb.c2
-rw-r--r--drivers/video/aty/atyfb.h3
-rw-r--r--drivers/video/aty/atyfb_base.c141
-rw-r--r--drivers/video/aty/mach64_accel.c7
-rw-r--r--drivers/video/backlight/tdo24m.c2
-rw-r--r--drivers/video/fbmem.c13
-rw-r--r--drivers/video/fsl-diu-fb.c14
-rw-r--r--drivers/video/i810/i810_main.c2
-rw-r--r--drivers/video/matrox/matroxfb_base.c3
-rw-r--r--drivers/video/matrox/matroxfb_crtc2.c5
-rw-r--r--drivers/video/mx3fb.c17
-rw-r--r--drivers/video/omap/omapfb_main.c4
-rw-r--r--drivers/video/platinumfb.c2
-rw-r--r--drivers/video/pxafb.c2
-rw-r--r--drivers/video/sh7760fb.c19
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c53
-rw-r--r--drivers/video/sis/sis_main.c2
-rw-r--r--drivers/video/sm501fb.c21
-rw-r--r--drivers/video/w100fb.c2
105 files changed, 1720 insertions, 808 deletions
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index c7a527c08a09..65a0655e7fc8 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -226,8 +226,18 @@ static inline void addQ(struct hlist_head *list, CommandList_struct *c)
226 226
227static inline void removeQ(CommandList_struct *c) 227static inline void removeQ(CommandList_struct *c)
228{ 228{
229 if (WARN_ON(hlist_unhashed(&c->list))) 229 /*
230 * After kexec/dump some commands might still
231 * be in flight, which the firmware will try
232 * to complete. Resetting the firmware doesn't work
233 * with old fw revisions, so we have to mark
234 * them off as 'stale' to prevent the driver from
235 * falling over.
236 */
237 if (WARN_ON(hlist_unhashed(&c->list))) {
238 c->cmd_type = CMD_MSG_STALE;
230 return; 239 return;
240 }
231 241
232 hlist_del_init(&c->list); 242 hlist_del_init(&c->list);
233} 243}
@@ -4246,7 +4256,8 @@ static void fail_all_cmds(unsigned long ctlr)
4246 while (!hlist_empty(&h->cmpQ)) { 4256 while (!hlist_empty(&h->cmpQ)) {
4247 c = hlist_entry(h->cmpQ.first, CommandList_struct, list); 4257 c = hlist_entry(h->cmpQ.first, CommandList_struct, list);
4248 removeQ(c); 4258 removeQ(c);
4249 c->err_info->CommandStatus = CMD_HARDWARE_ERR; 4259 if (c->cmd_type != CMD_MSG_STALE)
4260 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
4250 if (c->cmd_type == CMD_RWREQ) { 4261 if (c->cmd_type == CMD_RWREQ) {
4251 complete_command(h, c, 0); 4262 complete_command(h, c, 0);
4252 } else if (c->cmd_type == CMD_IOCTL_PEND) 4263 } else if (c->cmd_type == CMD_IOCTL_PEND)
diff --git a/drivers/block/cciss_cmd.h b/drivers/block/cciss_cmd.h
index cd665b00c7c5..dbaed1ea0da3 100644
--- a/drivers/block/cciss_cmd.h
+++ b/drivers/block/cciss_cmd.h
@@ -274,6 +274,7 @@ typedef struct _ErrorInfo_struct {
274#define CMD_SCSI 0x03 274#define CMD_SCSI 0x03
275#define CMD_MSG_DONE 0x04 275#define CMD_MSG_DONE 0x04
276#define CMD_MSG_TIMEOUT 0x05 276#define CMD_MSG_TIMEOUT 0x05
277#define CMD_MSG_STALE 0xff
277 278
278/* This structure needs to be divisible by 8 for new 279/* This structure needs to be divisible by 8 for new
279 * indexing method. 280 * indexing method.
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 862b40c90181..91b753013780 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3327,7 +3327,10 @@ static inline int set_geometry(unsigned int cmd, struct floppy_struct *g,
3327 if (!capable(CAP_SYS_ADMIN)) 3327 if (!capable(CAP_SYS_ADMIN))
3328 return -EPERM; 3328 return -EPERM;
3329 mutex_lock(&open_lock); 3329 mutex_lock(&open_lock);
3330 LOCK_FDC(drive, 1); 3330 if (lock_fdc(drive, 1)) {
3331 mutex_unlock(&open_lock);
3332 return -EINTR;
3333 }
3331 floppy_type[type] = *g; 3334 floppy_type[type] = *g;
3332 floppy_type[type].name = "user format"; 3335 floppy_type[type].name = "user format";
3333 for (cnt = type << 2; cnt < (type << 2) + 4; cnt++) 3336 for (cnt = type << 2; cnt < (type << 2) + 4; cnt++)
diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c
index a19e935847b0..913aa8d3f1c5 100644
--- a/drivers/char/tty_ldisc.c
+++ b/drivers/char/tty_ldisc.c
@@ -867,15 +867,22 @@ void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty)
867 tty_ldisc_wait_idle(tty); 867 tty_ldisc_wait_idle(tty);
868 868
869 /* 869 /*
870 * Shutdown the current line discipline, and reset it to N_TTY. 870 * Now kill off the ldisc
871 *
872 * FIXME: this MUST get fixed for the new reflocking
873 */ 871 */
872 tty_ldisc_close(tty, tty->ldisc);
873 tty_ldisc_put(tty->ldisc);
874 /* Force an oops if we mess this up */
875 tty->ldisc = NULL;
876
877 /* Ensure the next open requests the N_TTY ldisc */
878 tty_set_termios_ldisc(tty, N_TTY);
874 879
875 tty_ldisc_reinit(tty);
876 /* This will need doing differently if we need to lock */ 880 /* This will need doing differently if we need to lock */
877 if (o_tty) 881 if (o_tty)
878 tty_ldisc_release(o_tty, NULL); 882 tty_ldisc_release(o_tty, NULL);
883
884 /* And the memory resources remaining (buffers, termios) will be
885 disposed of when the kref hits zero */
879} 886}
880 887
881/** 888/**
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index 9ffb05f4095d..93c2322feab7 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -161,7 +161,7 @@ static void sh_tmu_set_next(struct sh_tmu_priv *p, unsigned long delta,
161 if (periodic) 161 if (periodic)
162 sh_tmu_write(p, TCOR, delta); 162 sh_tmu_write(p, TCOR, delta);
163 else 163 else
164 sh_tmu_write(p, TCOR, 0); 164 sh_tmu_write(p, TCOR, 0xffffffff);
165 165
166 sh_tmu_write(p, TCNT, delta); 166 sh_tmu_write(p, TCNT, delta);
167 167
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index 3493c6bdb820..871c13b4c148 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -150,6 +150,8 @@ enum mem_type {
150 MEM_FB_DDR2, /* fully buffered DDR2 */ 150 MEM_FB_DDR2, /* fully buffered DDR2 */
151 MEM_RDDR2, /* Registered DDR2 RAM */ 151 MEM_RDDR2, /* Registered DDR2 RAM */
152 MEM_XDR, /* Rambus XDR */ 152 MEM_XDR, /* Rambus XDR */
153 MEM_DDR3, /* DDR3 RAM */
154 MEM_RDDR3, /* Registered DDR3 RAM */
153}; 155};
154 156
155#define MEM_FLAG_EMPTY BIT(MEM_EMPTY) 157#define MEM_FLAG_EMPTY BIT(MEM_EMPTY)
@@ -167,6 +169,8 @@ enum mem_type {
167#define MEM_FLAG_FB_DDR2 BIT(MEM_FB_DDR2) 169#define MEM_FLAG_FB_DDR2 BIT(MEM_FB_DDR2)
168#define MEM_FLAG_RDDR2 BIT(MEM_RDDR2) 170#define MEM_FLAG_RDDR2 BIT(MEM_RDDR2)
169#define MEM_FLAG_XDR BIT(MEM_XDR) 171#define MEM_FLAG_XDR BIT(MEM_XDR)
172#define MEM_FLAG_DDR3 BIT(MEM_DDR3)
173#define MEM_FLAG_RDDR3 BIT(MEM_RDDR3)
170 174
171/* chipset Error Detection and Correction capabilities and mode */ 175/* chipset Error Detection and Correction capabilities and mode */
172enum edac_type { 176enum edac_type {
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index ad218fe4942d..e1d4ce083481 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -94,7 +94,9 @@ static const char *mem_types[] = {
94 [MEM_DDR2] = "Unbuffered-DDR2", 94 [MEM_DDR2] = "Unbuffered-DDR2",
95 [MEM_FB_DDR2] = "FullyBuffered-DDR2", 95 [MEM_FB_DDR2] = "FullyBuffered-DDR2",
96 [MEM_RDDR2] = "Registered-DDR2", 96 [MEM_RDDR2] = "Registered-DDR2",
97 [MEM_XDR] = "XDR" 97 [MEM_XDR] = "XDR",
98 [MEM_DDR3] = "Unbuffered-DDR3",
99 [MEM_RDDR3] = "Registered-DDR3"
98}; 100};
99 101
100static const char *dev_types[] = { 102static const char *dev_types[] = {
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 7c8c2d72916f..3f2ccfc6407c 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -757,6 +757,9 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
757 case DSC_SDTYPE_DDR2: 757 case DSC_SDTYPE_DDR2:
758 mtype = MEM_RDDR2; 758 mtype = MEM_RDDR2;
759 break; 759 break;
760 case DSC_SDTYPE_DDR3:
761 mtype = MEM_RDDR3;
762 break;
760 default: 763 default:
761 mtype = MEM_UNKNOWN; 764 mtype = MEM_UNKNOWN;
762 break; 765 break;
@@ -769,6 +772,9 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
769 case DSC_SDTYPE_DDR2: 772 case DSC_SDTYPE_DDR2:
770 mtype = MEM_DDR2; 773 mtype = MEM_DDR2;
771 break; 774 break;
775 case DSC_SDTYPE_DDR3:
776 mtype = MEM_DDR3;
777 break;
772 default: 778 default:
773 mtype = MEM_UNKNOWN; 779 mtype = MEM_UNKNOWN;
774 break; 780 break;
diff --git a/drivers/edac/mpc85xx_edac.h b/drivers/edac/mpc85xx_edac.h
index 135b3539a030..52432ee7c4b9 100644
--- a/drivers/edac/mpc85xx_edac.h
+++ b/drivers/edac/mpc85xx_edac.h
@@ -53,6 +53,7 @@
53 53
54#define DSC_SDTYPE_DDR 0x02000000 54#define DSC_SDTYPE_DDR 0x02000000
55#define DSC_SDTYPE_DDR2 0x03000000 55#define DSC_SDTYPE_DDR2 0x03000000
56#define DSC_SDTYPE_DDR3 0x07000000
56#define DSC_X32_EN 0x00000020 57#define DSC_X32_EN 0x00000020
57 58
58/* Err_Int_En */ 59/* Err_Int_En */
diff --git a/drivers/gpio/pl061.c b/drivers/gpio/pl061.c
index aa8e7cb020d9..4ee4c8367a3f 100644
--- a/drivers/gpio/pl061.c
+++ b/drivers/gpio/pl061.c
@@ -109,6 +109,16 @@ static void pl061_set_value(struct gpio_chip *gc, unsigned offset, int value)
109 writeb(!!value << offset, chip->base + (1 << (offset + 2))); 109 writeb(!!value << offset, chip->base + (1 << (offset + 2)));
110} 110}
111 111
112static int pl061_to_irq(struct gpio_chip *gc, unsigned offset)
113{
114 struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc);
115
116 if (chip->irq_base == (unsigned) -1)
117 return -EINVAL;
118
119 return chip->irq_base + offset;
120}
121
112/* 122/*
113 * PL061 GPIO IRQ 123 * PL061 GPIO IRQ
114 */ 124 */
@@ -200,7 +210,7 @@ static void pl061_irq_handler(unsigned irq, struct irq_desc *desc)
200 desc->chip->ack(irq); 210 desc->chip->ack(irq);
201 list_for_each(ptr, chip_list) { 211 list_for_each(ptr, chip_list) {
202 unsigned long pending; 212 unsigned long pending;
203 int gpio; 213 int offset;
204 214
205 chip = list_entry(ptr, struct pl061_gpio, list); 215 chip = list_entry(ptr, struct pl061_gpio, list);
206 pending = readb(chip->base + GPIOMIS); 216 pending = readb(chip->base + GPIOMIS);
@@ -209,8 +219,8 @@ static void pl061_irq_handler(unsigned irq, struct irq_desc *desc)
209 if (pending == 0) 219 if (pending == 0)
210 continue; 220 continue;
211 221
212 for_each_bit(gpio, &pending, PL061_GPIO_NR) 222 for_each_bit(offset, &pending, PL061_GPIO_NR)
213 generic_handle_irq(gpio_to_irq(gpio)); 223 generic_handle_irq(pl061_to_irq(&chip->gc, offset));
214 } 224 }
215 desc->chip->unmask(irq); 225 desc->chip->unmask(irq);
216} 226}
@@ -221,7 +231,7 @@ static int __init pl061_probe(struct amba_device *dev, struct amba_id *id)
221 struct pl061_gpio *chip; 231 struct pl061_gpio *chip;
222 struct list_head *chip_list; 232 struct list_head *chip_list;
223 int ret, irq, i; 233 int ret, irq, i;
224 static unsigned long init_irq[BITS_TO_LONGS(NR_IRQS)]; 234 static DECLARE_BITMAP(init_irq, NR_IRQS);
225 235
226 pdata = dev->dev.platform_data; 236 pdata = dev->dev.platform_data;
227 if (pdata == NULL) 237 if (pdata == NULL)
@@ -251,6 +261,7 @@ static int __init pl061_probe(struct amba_device *dev, struct amba_id *id)
251 chip->gc.direction_output = pl061_direction_output; 261 chip->gc.direction_output = pl061_direction_output;
252 chip->gc.get = pl061_get_value; 262 chip->gc.get = pl061_get_value;
253 chip->gc.set = pl061_set_value; 263 chip->gc.set = pl061_set_value;
264 chip->gc.to_irq = pl061_to_irq;
254 chip->gc.base = pdata->gpio_base; 265 chip->gc.base = pdata->gpio_base;
255 chip->gc.ngpio = PL061_GPIO_NR; 266 chip->gc.ngpio = PL061_GPIO_NR;
256 chip->gc.label = dev_name(&dev->dev); 267 chip->gc.label = dev_name(&dev->dev);
@@ -280,6 +291,7 @@ static int __init pl061_probe(struct amba_device *dev, struct amba_id *id)
280 if (!test_and_set_bit(irq, init_irq)) { /* list initialized? */ 291 if (!test_and_set_bit(irq, init_irq)) { /* list initialized? */
281 chip_list = kmalloc(sizeof(*chip_list), GFP_KERNEL); 292 chip_list = kmalloc(sizeof(*chip_list), GFP_KERNEL);
282 if (chip_list == NULL) { 293 if (chip_list == NULL) {
294 clear_bit(irq, init_irq);
283 ret = -ENOMEM; 295 ret = -ENOMEM;
284 goto iounmap; 296 goto iounmap;
285 } 297 }
diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c
index 77f79d26b264..c509c9916464 100644
--- a/drivers/ide/ide-acpi.c
+++ b/drivers/ide/ide-acpi.c
@@ -92,6 +92,11 @@ int ide_acpi_init(void)
92 return 0; 92 return 0;
93} 93}
94 94
95bool ide_port_acpi(ide_hwif_t *hwif)
96{
97 return ide_noacpi == 0 && hwif->acpidata;
98}
99
95/** 100/**
96 * ide_get_dev_handle - finds acpi_handle and PCI device.function 101 * ide_get_dev_handle - finds acpi_handle and PCI device.function
97 * @dev: device to locate 102 * @dev: device to locate
@@ -352,9 +357,6 @@ int ide_acpi_exec_tfs(ide_drive_t *drive)
352 unsigned long gtf_address; 357 unsigned long gtf_address;
353 unsigned long obj_loc; 358 unsigned long obj_loc;
354 359
355 if (ide_noacpi)
356 return 0;
357
358 DEBPRINT("call get_GTF, drive=%s port=%d\n", drive->name, drive->dn); 360 DEBPRINT("call get_GTF, drive=%s port=%d\n", drive->name, drive->dn);
359 361
360 ret = do_drive_get_GTF(drive, &gtf_length, &gtf_address, &obj_loc); 362 ret = do_drive_get_GTF(drive, &gtf_length, &gtf_address, &obj_loc);
@@ -389,16 +391,6 @@ void ide_acpi_get_timing(ide_hwif_t *hwif)
389 struct acpi_buffer output; 391 struct acpi_buffer output;
390 union acpi_object *out_obj; 392 union acpi_object *out_obj;
391 393
392 if (ide_noacpi)
393 return;
394
395 DEBPRINT("ENTER:\n");
396
397 if (!hwif->acpidata) {
398 DEBPRINT("no ACPI data for %s\n", hwif->name);
399 return;
400 }
401
402 /* Setting up output buffer for _GTM */ 394 /* Setting up output buffer for _GTM */
403 output.length = ACPI_ALLOCATE_BUFFER; 395 output.length = ACPI_ALLOCATE_BUFFER;
404 output.pointer = NULL; /* ACPI-CA sets this; save/free it later */ 396 output.pointer = NULL; /* ACPI-CA sets this; save/free it later */
@@ -479,16 +471,6 @@ void ide_acpi_push_timing(ide_hwif_t *hwif)
479 struct ide_acpi_drive_link *master = &hwif->acpidata->master; 471 struct ide_acpi_drive_link *master = &hwif->acpidata->master;
480 struct ide_acpi_drive_link *slave = &hwif->acpidata->slave; 472 struct ide_acpi_drive_link *slave = &hwif->acpidata->slave;
481 473
482 if (ide_noacpi)
483 return;
484
485 DEBPRINT("ENTER:\n");
486
487 if (!hwif->acpidata) {
488 DEBPRINT("no ACPI data for %s\n", hwif->name);
489 return;
490 }
491
492 /* Give the GTM buffer + drive Identify data to the channel via the 474 /* Give the GTM buffer + drive Identify data to the channel via the
493 * _STM method: */ 475 * _STM method: */
494 /* setup input parameters buffer for _STM */ 476 /* setup input parameters buffer for _STM */
@@ -527,16 +509,11 @@ void ide_acpi_set_state(ide_hwif_t *hwif, int on)
527 ide_drive_t *drive; 509 ide_drive_t *drive;
528 int i; 510 int i;
529 511
530 if (ide_noacpi || ide_noacpi_psx) 512 if (ide_noacpi_psx)
531 return; 513 return;
532 514
533 DEBPRINT("ENTER:\n"); 515 DEBPRINT("ENTER:\n");
534 516
535 if (!hwif->acpidata) {
536 DEBPRINT("no ACPI data for %s\n", hwif->name);
537 return;
538 }
539
540 /* channel first and then drives for power on and verse versa for power off */ 517 /* channel first and then drives for power on and verse versa for power off */
541 if (on) 518 if (on)
542 acpi_bus_set_power(hwif->acpidata->obj_handle, ACPI_STATE_D0); 519 acpi_bus_set_power(hwif->acpidata->obj_handle, ACPI_STATE_D0);
@@ -616,7 +593,7 @@ void ide_acpi_port_init_devices(ide_hwif_t *hwif)
616 drive->name, err); 593 drive->name, err);
617 } 594 }
618 595
619 if (!ide_acpionboot) { 596 if (ide_noacpi || ide_acpionboot == 0) {
620 DEBPRINT("ACPI methods disabled on boot\n"); 597 DEBPRINT("ACPI methods disabled on boot\n");
621 return; 598 return;
622 } 599 }
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index f0ede5953af8..6a9a769bffc1 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -592,9 +592,19 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
592 } 592 }
593 } else if (!blk_pc_request(rq)) { 593 } else if (!blk_pc_request(rq)) {
594 ide_cd_request_sense_fixup(drive, cmd); 594 ide_cd_request_sense_fixup(drive, cmd);
595 /* complain if we still have data left to transfer */ 595
596 uptodate = cmd->nleft ? 0 : 1; 596 uptodate = cmd->nleft ? 0 : 1;
597 if (uptodate == 0) 597
598 /*
599 * suck out the remaining bytes from the drive in an
600 * attempt to complete the data xfer. (see BZ#13399)
601 */
602 if (!(stat & ATA_ERR) && !uptodate && thislen) {
603 ide_pio_bytes(drive, cmd, write, thislen);
604 uptodate = cmd->nleft ? 0 : 1;
605 }
606
607 if (!uptodate)
598 rq->cmd_flags |= REQ_FAILED; 608 rq->cmd_flags |= REQ_FAILED;
599 } 609 }
600 goto out_end; 610 goto out_end;
diff --git a/drivers/ide/ide-devsets.c b/drivers/ide/ide-devsets.c
index 5bf958e5b1d5..1099bf7cf968 100644
--- a/drivers/ide/ide-devsets.c
+++ b/drivers/ide/ide-devsets.c
@@ -183,6 +183,6 @@ ide_startstop_t ide_do_devset(ide_drive_t *drive, struct request *rq)
183 err = setfunc(drive, *(int *)&rq->cmd[1]); 183 err = setfunc(drive, *(int *)&rq->cmd[1]);
184 if (err) 184 if (err)
185 rq->errors = err; 185 rq->errors = err;
186 ide_complete_rq(drive, err, ide_rq_bytes(rq)); 186 ide_complete_rq(drive, err, blk_rq_bytes(rq));
187 return ide_stopped; 187 return ide_stopped;
188} 188}
diff --git a/drivers/ide/ide-eh.c b/drivers/ide/ide-eh.c
index 2b9141979613..e9abf2c3c335 100644
--- a/drivers/ide/ide-eh.c
+++ b/drivers/ide/ide-eh.c
@@ -149,7 +149,7 @@ static inline void ide_complete_drive_reset(ide_drive_t *drive, int err)
149 if (rq && blk_special_request(rq) && rq->cmd[0] == REQ_DRIVE_RESET) { 149 if (rq && blk_special_request(rq) && rq->cmd[0] == REQ_DRIVE_RESET) {
150 if (err <= 0 && rq->errors == 0) 150 if (err <= 0 && rq->errors == 0)
151 rq->errors = -EIO; 151 rq->errors = -EIO;
152 ide_complete_rq(drive, err ? err : 0, ide_rq_bytes(rq)); 152 ide_complete_rq(drive, err ? err : 0, blk_rq_bytes(rq));
153 } 153 }
154} 154}
155 155
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index 8b3f204f7d73..fefbdfc8db06 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -293,7 +293,7 @@ out_end:
293 drive->failed_pc = NULL; 293 drive->failed_pc = NULL;
294 if (blk_fs_request(rq) == 0 && rq->errors == 0) 294 if (blk_fs_request(rq) == 0 && rq->errors == 0)
295 rq->errors = -EIO; 295 rq->errors = -EIO;
296 ide_complete_rq(drive, -EIO, ide_rq_bytes(rq)); 296 ide_complete_rq(drive, -EIO, blk_rq_bytes(rq));
297 return ide_stopped; 297 return ide_stopped;
298} 298}
299 299
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 93b7886a2d6e..d5f3c77beadd 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -112,16 +112,6 @@ void ide_complete_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat, u8 err)
112 } 112 }
113} 113}
114 114
115/* obsolete, blk_rq_bytes() should be used instead */
116unsigned int ide_rq_bytes(struct request *rq)
117{
118 if (blk_pc_request(rq))
119 return blk_rq_bytes(rq);
120 else
121 return blk_rq_cur_sectors(rq) << 9;
122}
123EXPORT_SYMBOL_GPL(ide_rq_bytes);
124
125int ide_complete_rq(ide_drive_t *drive, int error, unsigned int nr_bytes) 115int ide_complete_rq(ide_drive_t *drive, int error, unsigned int nr_bytes)
126{ 116{
127 ide_hwif_t *hwif = drive->hwif; 117 ide_hwif_t *hwif = drive->hwif;
@@ -152,14 +142,14 @@ void ide_kill_rq(ide_drive_t *drive, struct request *rq)
152 142
153 if ((media == ide_floppy || media == ide_tape) && drv_req) { 143 if ((media == ide_floppy || media == ide_tape) && drv_req) {
154 rq->errors = 0; 144 rq->errors = 0;
155 ide_complete_rq(drive, 0, blk_rq_bytes(rq));
156 } else { 145 } else {
157 if (media == ide_tape) 146 if (media == ide_tape)
158 rq->errors = IDE_DRV_ERROR_GENERAL; 147 rq->errors = IDE_DRV_ERROR_GENERAL;
159 else if (blk_fs_request(rq) == 0 && rq->errors == 0) 148 else if (blk_fs_request(rq) == 0 && rq->errors == 0)
160 rq->errors = -EIO; 149 rq->errors = -EIO;
161 ide_complete_rq(drive, -EIO, ide_rq_bytes(rq));
162 } 150 }
151
152 ide_complete_rq(drive, -EIO, blk_rq_bytes(rq));
163} 153}
164 154
165static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf) 155static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
diff --git a/drivers/ide/ide-ioctls.c b/drivers/ide/ide-ioctls.c
index 82f252c3ee6e..e246d3d3fbcc 100644
--- a/drivers/ide/ide-ioctls.c
+++ b/drivers/ide/ide-ioctls.c
@@ -64,7 +64,8 @@ static int ide_get_identity_ioctl(ide_drive_t *drive, unsigned int cmd,
64 goto out; 64 goto out;
65 } 65 }
66 66
67 id = kmalloc(size, GFP_KERNEL); 67 /* ata_id_to_hd_driveid() relies on 'id' to be fully allocated. */
68 id = kmalloc(ATA_ID_WORDS * 2, GFP_KERNEL);
68 if (id == NULL) { 69 if (id == NULL) {
69 rc = -ENOMEM; 70 rc = -ENOMEM;
70 goto out; 71 goto out;
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c
index c14ca144cffe..ad7be2669dcb 100644
--- a/drivers/ide/ide-pm.c
+++ b/drivers/ide/ide-pm.c
@@ -10,9 +10,11 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)
10 struct request_pm_state rqpm; 10 struct request_pm_state rqpm;
11 int ret; 11 int ret;
12 12
13 /* call ACPI _GTM only once */ 13 if (ide_port_acpi(hwif)) {
14 if ((drive->dn & 1) == 0 || pair == NULL) 14 /* call ACPI _GTM only once */
15 ide_acpi_get_timing(hwif); 15 if ((drive->dn & 1) == 0 || pair == NULL)
16 ide_acpi_get_timing(hwif);
17 }
16 18
17 memset(&rqpm, 0, sizeof(rqpm)); 19 memset(&rqpm, 0, sizeof(rqpm));
18 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); 20 rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
@@ -26,9 +28,11 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)
26 ret = blk_execute_rq(drive->queue, NULL, rq, 0); 28 ret = blk_execute_rq(drive->queue, NULL, rq, 0);
27 blk_put_request(rq); 29 blk_put_request(rq);
28 30
29 /* call ACPI _PS3 only after both devices are suspended */ 31 if (ret == 0 && ide_port_acpi(hwif)) {
30 if (ret == 0 && ((drive->dn & 1) || pair == NULL)) 32 /* call ACPI _PS3 only after both devices are suspended */
31 ide_acpi_set_state(hwif, 0); 33 if ((drive->dn & 1) || pair == NULL)
34 ide_acpi_set_state(hwif, 0);
35 }
32 36
33 return ret; 37 return ret;
34} 38}
@@ -42,13 +46,15 @@ int generic_ide_resume(struct device *dev)
42 struct request_pm_state rqpm; 46 struct request_pm_state rqpm;
43 int err; 47 int err;
44 48
45 /* call ACPI _PS0 / _STM only once */ 49 if (ide_port_acpi(hwif)) {
46 if ((drive->dn & 1) == 0 || pair == NULL) { 50 /* call ACPI _PS0 / _STM only once */
47 ide_acpi_set_state(hwif, 1); 51 if ((drive->dn & 1) == 0 || pair == NULL) {
48 ide_acpi_push_timing(hwif); 52 ide_acpi_set_state(hwif, 1);
49 } 53 ide_acpi_push_timing(hwif);
54 }
50 55
51 ide_acpi_exec_tfs(drive); 56 ide_acpi_exec_tfs(drive);
57 }
52 58
53 memset(&rqpm, 0, sizeof(rqpm)); 59 memset(&rqpm, 0, sizeof(rqpm));
54 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); 60 rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 9b60b6b684d9..7c8e7122aaa9 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -75,6 +75,7 @@ config LEDS_ALIX2
75 depends on LEDS_CLASS && X86 && EXPERIMENTAL 75 depends on LEDS_CLASS && X86 && EXPERIMENTAL
76 help 76 help
77 This option enables support for the PCEngines ALIX.2 and ALIX.3 LEDs. 77 This option enables support for the PCEngines ALIX.2 and ALIX.3 LEDs.
78 You have to set leds-alix2.force=1 for boards with Award BIOS.
78 79
79config LEDS_H1940 80config LEDS_H1940
80 tristate "LED Support for iPAQ H1940 device" 81 tristate "LED Support for iPAQ H1940 device"
@@ -145,15 +146,16 @@ config LEDS_GPIO_OF
145 of_platform devices. For instance, LEDs which are listed in a "dts" 146 of_platform devices. For instance, LEDs which are listed in a "dts"
146 file. 147 file.
147 148
148config LEDS_LP5521 149config LEDS_LP3944
149 tristate "LED Support for the LP5521 LEDs" 150 tristate "LED Support for N.S. LP3944 (Fun Light) I2C chip"
150 depends on LEDS_CLASS && I2C 151 depends on LEDS_CLASS && I2C
151 help 152 help
152 If you say 'Y' here you get support for the National Semiconductor 153 This option enables support for LEDs connected to the National
153 LP5521 LED driver used in n8x0 boards. 154 Semiconductor LP3944 Lighting Management Unit (LMU) also known as
155 Fun Light Chip.
154 156
155 This driver can be built as a module by choosing 'M'. The module 157 To compile this driver as a module, choose M here: the
156 will be called leds-lp5521. 158 module will be called leds-lp3944.
157 159
158config LEDS_CLEVO_MAIL 160config LEDS_CLEVO_MAIL
159 tristate "Mail LED on Clevo notebook" 161 tristate "Mail LED on Clevo notebook"
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 2d41c4dcf92f..e8cdcf77a4c3 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_LEDS_COBALT_RAQ) += leds-cobalt-raq.o
20obj-$(CONFIG_LEDS_SUNFIRE) += leds-sunfire.o 20obj-$(CONFIG_LEDS_SUNFIRE) += leds-sunfire.o
21obj-$(CONFIG_LEDS_PCA9532) += leds-pca9532.o 21obj-$(CONFIG_LEDS_PCA9532) += leds-pca9532.o
22obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o 22obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o
23obj-$(CONFIG_LEDS_LP3944) += leds-lp3944.o
23obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o 24obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o
24obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o 25obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o
25obj-$(CONFIG_LEDS_FSG) += leds-fsg.o 26obj-$(CONFIG_LEDS_FSG) += leds-fsg.o
diff --git a/drivers/leds/leds-alix2.c b/drivers/leds/leds-alix2.c
index ddbd7730dfc8..731d4eef3425 100644
--- a/drivers/leds/leds-alix2.c
+++ b/drivers/leds/leds-alix2.c
@@ -14,7 +14,7 @@
14 14
15static int force = 0; 15static int force = 0;
16module_param(force, bool, 0444); 16module_param(force, bool, 0444);
17MODULE_PARM_DESC(force, "Assume system has ALIX.2 style LEDs"); 17MODULE_PARM_DESC(force, "Assume system has ALIX.2/ALIX.3 style LEDs");
18 18
19struct alix_led { 19struct alix_led {
20 struct led_classdev cdev; 20 struct led_classdev cdev;
@@ -155,6 +155,11 @@ static int __init alix_led_init(void)
155 goto out; 155 goto out;
156 } 156 }
157 157
158 /* enable output on GPIO for LED 1,2,3 */
159 outl(1 << 6, 0x6104);
160 outl(1 << 9, 0x6184);
161 outl(1 << 11, 0x6184);
162
158 pdev = platform_device_register_simple(KBUILD_MODNAME, -1, NULL, 0); 163 pdev = platform_device_register_simple(KBUILD_MODNAME, -1, NULL, 0);
159 if (!IS_ERR(pdev)) { 164 if (!IS_ERR(pdev)) {
160 ret = platform_driver_probe(&alix_led_driver, alix_led_probe); 165 ret = platform_driver_probe(&alix_led_driver, alix_led_probe);
diff --git a/drivers/leds/leds-bd2802.c b/drivers/leds/leds-bd2802.c
index 4149ecb3a9b2..779d7f262c04 100644
--- a/drivers/leds/leds-bd2802.c
+++ b/drivers/leds/leds-bd2802.c
@@ -97,6 +97,10 @@ struct bd2802_led {
97 enum led_ids led_id; 97 enum led_ids led_id;
98 enum led_colors color; 98 enum led_colors color;
99 enum led_bits state; 99 enum led_bits state;
100
101 /* General attributes of RGB LEDs */
102 int wave_pattern;
103 int rgb_current;
100}; 104};
101 105
102 106
@@ -254,7 +258,7 @@ static void bd2802_set_on(struct bd2802_led *led, enum led_ids id,
254 bd2802_reset_cancel(led); 258 bd2802_reset_cancel(led);
255 259
256 reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT1SETUP); 260 reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT1SETUP);
257 bd2802_write_byte(led->client, reg, BD2802_CURRENT_032); 261 bd2802_write_byte(led->client, reg, led->rgb_current);
258 reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT2SETUP); 262 reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT2SETUP);
259 bd2802_write_byte(led->client, reg, BD2802_CURRENT_000); 263 bd2802_write_byte(led->client, reg, BD2802_CURRENT_000);
260 reg = bd2802_get_reg_addr(id, color, BD2802_REG_WAVEPATTERN); 264 reg = bd2802_get_reg_addr(id, color, BD2802_REG_WAVEPATTERN);
@@ -275,9 +279,9 @@ static void bd2802_set_blink(struct bd2802_led *led, enum led_ids id,
275 reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT1SETUP); 279 reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT1SETUP);
276 bd2802_write_byte(led->client, reg, BD2802_CURRENT_000); 280 bd2802_write_byte(led->client, reg, BD2802_CURRENT_000);
277 reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT2SETUP); 281 reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT2SETUP);
278 bd2802_write_byte(led->client, reg, BD2802_CURRENT_032); 282 bd2802_write_byte(led->client, reg, led->rgb_current);
279 reg = bd2802_get_reg_addr(id, color, BD2802_REG_WAVEPATTERN); 283 reg = bd2802_get_reg_addr(id, color, BD2802_REG_WAVEPATTERN);
280 bd2802_write_byte(led->client, reg, BD2802_PATTERN_HALF); 284 bd2802_write_byte(led->client, reg, led->wave_pattern);
281 285
282 bd2802_enable(led, id); 286 bd2802_enable(led, id);
283 bd2802_update_state(led, id, color, BD2802_BLINK); 287 bd2802_update_state(led, id, color, BD2802_BLINK);
@@ -406,7 +410,7 @@ static void bd2802_enable_adv_conf(struct bd2802_led *led)
406 ret = device_create_file(&led->client->dev, 410 ret = device_create_file(&led->client->dev,
407 bd2802_addr_attributes[i]); 411 bd2802_addr_attributes[i]);
408 if (ret) { 412 if (ret) {
409 dev_err(&led->client->dev, "failed to sysfs file %s\n", 413 dev_err(&led->client->dev, "failed: sysfs file %s\n",
410 bd2802_addr_attributes[i]->attr.name); 414 bd2802_addr_attributes[i]->attr.name);
411 goto failed_remove_files; 415 goto failed_remove_files;
412 } 416 }
@@ -483,6 +487,52 @@ static struct device_attribute bd2802_adv_conf_attr = {
483 .store = bd2802_store_adv_conf, 487 .store = bd2802_store_adv_conf,
484}; 488};
485 489
490#define BD2802_CONTROL_ATTR(attr_name, name_str) \
491static ssize_t bd2802_show_##attr_name(struct device *dev, \
492 struct device_attribute *attr, char *buf) \
493{ \
494 struct bd2802_led *led = i2c_get_clientdata(to_i2c_client(dev));\
495 ssize_t ret; \
496 down_read(&led->rwsem); \
497 ret = sprintf(buf, "0x%02x\n", led->attr_name); \
498 up_read(&led->rwsem); \
499 return ret; \
500} \
501static ssize_t bd2802_store_##attr_name(struct device *dev, \
502 struct device_attribute *attr, const char *buf, size_t count) \
503{ \
504 struct bd2802_led *led = i2c_get_clientdata(to_i2c_client(dev));\
505 unsigned long val; \
506 int ret; \
507 if (!count) \
508 return -EINVAL; \
509 ret = strict_strtoul(buf, 16, &val); \
510 if (ret) \
511 return ret; \
512 down_write(&led->rwsem); \
513 led->attr_name = val; \
514 up_write(&led->rwsem); \
515 return count; \
516} \
517static struct device_attribute bd2802_##attr_name##_attr = { \
518 .attr = { \
519 .name = name_str, \
520 .mode = 0644, \
521 .owner = THIS_MODULE \
522 }, \
523 .show = bd2802_show_##attr_name, \
524 .store = bd2802_store_##attr_name, \
525};
526
527BD2802_CONTROL_ATTR(wave_pattern, "wave_pattern");
528BD2802_CONTROL_ATTR(rgb_current, "rgb_current");
529
530static struct device_attribute *bd2802_attributes[] = {
531 &bd2802_adv_conf_attr,
532 &bd2802_wave_pattern_attr,
533 &bd2802_rgb_current_attr,
534};
535
486static void bd2802_led_work(struct work_struct *work) 536static void bd2802_led_work(struct work_struct *work)
487{ 537{
488 struct bd2802_led *led = container_of(work, struct bd2802_led, work); 538 struct bd2802_led *led = container_of(work, struct bd2802_led, work);
@@ -538,7 +588,6 @@ static int bd2802_register_led_classdev(struct bd2802_led *led)
538 led->cdev_led1r.brightness = LED_OFF; 588 led->cdev_led1r.brightness = LED_OFF;
539 led->cdev_led1r.brightness_set = bd2802_set_led1r_brightness; 589 led->cdev_led1r.brightness_set = bd2802_set_led1r_brightness;
540 led->cdev_led1r.blink_set = bd2802_set_led1r_blink; 590 led->cdev_led1r.blink_set = bd2802_set_led1r_blink;
541 led->cdev_led1r.flags |= LED_CORE_SUSPENDRESUME;
542 591
543 ret = led_classdev_register(&led->client->dev, &led->cdev_led1r); 592 ret = led_classdev_register(&led->client->dev, &led->cdev_led1r);
544 if (ret < 0) { 593 if (ret < 0) {
@@ -551,7 +600,6 @@ static int bd2802_register_led_classdev(struct bd2802_led *led)
551 led->cdev_led1g.brightness = LED_OFF; 600 led->cdev_led1g.brightness = LED_OFF;
552 led->cdev_led1g.brightness_set = bd2802_set_led1g_brightness; 601 led->cdev_led1g.brightness_set = bd2802_set_led1g_brightness;
553 led->cdev_led1g.blink_set = bd2802_set_led1g_blink; 602 led->cdev_led1g.blink_set = bd2802_set_led1g_blink;
554 led->cdev_led1g.flags |= LED_CORE_SUSPENDRESUME;
555 603
556 ret = led_classdev_register(&led->client->dev, &led->cdev_led1g); 604 ret = led_classdev_register(&led->client->dev, &led->cdev_led1g);
557 if (ret < 0) { 605 if (ret < 0) {
@@ -564,7 +612,6 @@ static int bd2802_register_led_classdev(struct bd2802_led *led)
564 led->cdev_led1b.brightness = LED_OFF; 612 led->cdev_led1b.brightness = LED_OFF;
565 led->cdev_led1b.brightness_set = bd2802_set_led1b_brightness; 613 led->cdev_led1b.brightness_set = bd2802_set_led1b_brightness;
566 led->cdev_led1b.blink_set = bd2802_set_led1b_blink; 614 led->cdev_led1b.blink_set = bd2802_set_led1b_blink;
567 led->cdev_led1b.flags |= LED_CORE_SUSPENDRESUME;
568 615
569 ret = led_classdev_register(&led->client->dev, &led->cdev_led1b); 616 ret = led_classdev_register(&led->client->dev, &led->cdev_led1b);
570 if (ret < 0) { 617 if (ret < 0) {
@@ -577,7 +624,6 @@ static int bd2802_register_led_classdev(struct bd2802_led *led)
577 led->cdev_led2r.brightness = LED_OFF; 624 led->cdev_led2r.brightness = LED_OFF;
578 led->cdev_led2r.brightness_set = bd2802_set_led2r_brightness; 625 led->cdev_led2r.brightness_set = bd2802_set_led2r_brightness;
579 led->cdev_led2r.blink_set = bd2802_set_led2r_blink; 626 led->cdev_led2r.blink_set = bd2802_set_led2r_blink;
580 led->cdev_led2r.flags |= LED_CORE_SUSPENDRESUME;
581 627
582 ret = led_classdev_register(&led->client->dev, &led->cdev_led2r); 628 ret = led_classdev_register(&led->client->dev, &led->cdev_led2r);
583 if (ret < 0) { 629 if (ret < 0) {
@@ -590,7 +636,6 @@ static int bd2802_register_led_classdev(struct bd2802_led *led)
590 led->cdev_led2g.brightness = LED_OFF; 636 led->cdev_led2g.brightness = LED_OFF;
591 led->cdev_led2g.brightness_set = bd2802_set_led2g_brightness; 637 led->cdev_led2g.brightness_set = bd2802_set_led2g_brightness;
592 led->cdev_led2g.blink_set = bd2802_set_led2g_blink; 638 led->cdev_led2g.blink_set = bd2802_set_led2g_blink;
593 led->cdev_led2g.flags |= LED_CORE_SUSPENDRESUME;
594 639
595 ret = led_classdev_register(&led->client->dev, &led->cdev_led2g); 640 ret = led_classdev_register(&led->client->dev, &led->cdev_led2g);
596 if (ret < 0) { 641 if (ret < 0) {
@@ -640,7 +685,7 @@ static int __devinit bd2802_probe(struct i2c_client *client,
640{ 685{
641 struct bd2802_led *led; 686 struct bd2802_led *led;
642 struct bd2802_led_platform_data *pdata; 687 struct bd2802_led_platform_data *pdata;
643 int ret; 688 int ret, i;
644 689
645 led = kzalloc(sizeof(struct bd2802_led), GFP_KERNEL); 690 led = kzalloc(sizeof(struct bd2802_led), GFP_KERNEL);
646 if (!led) { 691 if (!led) {
@@ -670,13 +715,20 @@ static int __devinit bd2802_probe(struct i2c_client *client,
670 /* To save the power, reset BD2802 after detecting */ 715 /* To save the power, reset BD2802 after detecting */
671 gpio_set_value(led->pdata->reset_gpio, 0); 716 gpio_set_value(led->pdata->reset_gpio, 0);
672 717
718 /* Default attributes */
719 led->wave_pattern = BD2802_PATTERN_HALF;
720 led->rgb_current = BD2802_CURRENT_032;
721
673 init_rwsem(&led->rwsem); 722 init_rwsem(&led->rwsem);
674 723
675 ret = device_create_file(&client->dev, &bd2802_adv_conf_attr); 724 for (i = 0; i < ARRAY_SIZE(bd2802_attributes); i++) {
676 if (ret) { 725 ret = device_create_file(&led->client->dev,
677 dev_err(&client->dev, "failed to create sysfs file %s\n", 726 bd2802_attributes[i]);
678 bd2802_adv_conf_attr.attr.name); 727 if (ret) {
679 goto failed_free; 728 dev_err(&led->client->dev, "failed: sysfs file %s\n",
729 bd2802_attributes[i]->attr.name);
730 goto failed_unregister_dev_file;
731 }
680 } 732 }
681 733
682 ret = bd2802_register_led_classdev(led); 734 ret = bd2802_register_led_classdev(led);
@@ -686,7 +738,8 @@ static int __devinit bd2802_probe(struct i2c_client *client,
686 return 0; 738 return 0;
687 739
688failed_unregister_dev_file: 740failed_unregister_dev_file:
689 device_remove_file(&client->dev, &bd2802_adv_conf_attr); 741 for (i--; i >= 0; i--)
742 device_remove_file(&led->client->dev, bd2802_attributes[i]);
690failed_free: 743failed_free:
691 i2c_set_clientdata(client, NULL); 744 i2c_set_clientdata(client, NULL);
692 kfree(led); 745 kfree(led);
@@ -697,12 +750,14 @@ failed_free:
697static int __exit bd2802_remove(struct i2c_client *client) 750static int __exit bd2802_remove(struct i2c_client *client)
698{ 751{
699 struct bd2802_led *led = i2c_get_clientdata(client); 752 struct bd2802_led *led = i2c_get_clientdata(client);
753 int i;
700 754
701 bd2802_unregister_led_classdev(led);
702 gpio_set_value(led->pdata->reset_gpio, 0); 755 gpio_set_value(led->pdata->reset_gpio, 0);
756 bd2802_unregister_led_classdev(led);
703 if (led->adf_on) 757 if (led->adf_on)
704 bd2802_disable_adv_conf(led); 758 bd2802_disable_adv_conf(led);
705 device_remove_file(&client->dev, &bd2802_adv_conf_attr); 759 for (i = 0; i < ARRAY_SIZE(bd2802_attributes); i++)
760 device_remove_file(&led->client->dev, bd2802_attributes[i]);
706 i2c_set_clientdata(client, NULL); 761 i2c_set_clientdata(client, NULL);
707 kfree(led); 762 kfree(led);
708 763
@@ -723,8 +778,7 @@ static int bd2802_resume(struct i2c_client *client)
723 struct bd2802_led *led = i2c_get_clientdata(client); 778 struct bd2802_led *led = i2c_get_clientdata(client);
724 779
725 if (!bd2802_is_all_off(led) || led->adf_on) { 780 if (!bd2802_is_all_off(led) || led->adf_on) {
726 gpio_set_value(led->pdata->reset_gpio, 1); 781 bd2802_reset_cancel(led);
727 udelay(100);
728 bd2802_restore_state(led); 782 bd2802_restore_state(led);
729 } 783 }
730 784
@@ -762,4 +816,4 @@ module_exit(bd2802_exit);
762 816
763MODULE_AUTHOR("Kim Kyuwon <q1.kim@samsung.com>"); 817MODULE_AUTHOR("Kim Kyuwon <q1.kim@samsung.com>");
764MODULE_DESCRIPTION("BD2802 LED driver"); 818MODULE_DESCRIPTION("BD2802 LED driver");
765MODULE_LICENSE("GPL"); 819MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index d2109054de85..6b06638eb5b4 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -76,7 +76,7 @@ static int __devinit create_gpio_led(const struct gpio_led *template,
76 struct gpio_led_data *led_dat, struct device *parent, 76 struct gpio_led_data *led_dat, struct device *parent,
77 int (*blink_set)(unsigned, unsigned long *, unsigned long *)) 77 int (*blink_set)(unsigned, unsigned long *, unsigned long *))
78{ 78{
79 int ret; 79 int ret, state;
80 80
81 /* skip leds that aren't available */ 81 /* skip leds that aren't available */
82 if (!gpio_is_valid(template->gpio)) { 82 if (!gpio_is_valid(template->gpio)) {
@@ -99,11 +99,15 @@ static int __devinit create_gpio_led(const struct gpio_led *template,
99 led_dat->cdev.blink_set = gpio_blink_set; 99 led_dat->cdev.blink_set = gpio_blink_set;
100 } 100 }
101 led_dat->cdev.brightness_set = gpio_led_set; 101 led_dat->cdev.brightness_set = gpio_led_set;
102 led_dat->cdev.brightness = LED_OFF; 102 if (template->default_state == LEDS_GPIO_DEFSTATE_KEEP)
103 state = !!gpio_get_value(led_dat->gpio) ^ led_dat->active_low;
104 else
105 state = (template->default_state == LEDS_GPIO_DEFSTATE_ON);
106 led_dat->cdev.brightness = state ? LED_FULL : LED_OFF;
103 if (!template->retain_state_suspended) 107 if (!template->retain_state_suspended)
104 led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME; 108 led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
105 109
106 ret = gpio_direction_output(led_dat->gpio, led_dat->active_low); 110 ret = gpio_direction_output(led_dat->gpio, led_dat->active_low ^ state);
107 if (ret < 0) 111 if (ret < 0)
108 goto err; 112 goto err;
109 113
@@ -129,7 +133,7 @@ static void delete_gpio_led(struct gpio_led_data *led)
129} 133}
130 134
131#ifdef CONFIG_LEDS_GPIO_PLATFORM 135#ifdef CONFIG_LEDS_GPIO_PLATFORM
132static int gpio_led_probe(struct platform_device *pdev) 136static int __devinit gpio_led_probe(struct platform_device *pdev)
133{ 137{
134 struct gpio_led_platform_data *pdata = pdev->dev.platform_data; 138 struct gpio_led_platform_data *pdata = pdev->dev.platform_data;
135 struct gpio_led_data *leds_data; 139 struct gpio_led_data *leds_data;
@@ -223,12 +227,22 @@ static int __devinit of_gpio_leds_probe(struct of_device *ofdev,
223 memset(&led, 0, sizeof(led)); 227 memset(&led, 0, sizeof(led));
224 for_each_child_of_node(np, child) { 228 for_each_child_of_node(np, child) {
225 enum of_gpio_flags flags; 229 enum of_gpio_flags flags;
230 const char *state;
226 231
227 led.gpio = of_get_gpio_flags(child, 0, &flags); 232 led.gpio = of_get_gpio_flags(child, 0, &flags);
228 led.active_low = flags & OF_GPIO_ACTIVE_LOW; 233 led.active_low = flags & OF_GPIO_ACTIVE_LOW;
229 led.name = of_get_property(child, "label", NULL) ? : child->name; 234 led.name = of_get_property(child, "label", NULL) ? : child->name;
230 led.default_trigger = 235 led.default_trigger =
231 of_get_property(child, "linux,default-trigger", NULL); 236 of_get_property(child, "linux,default-trigger", NULL);
237 state = of_get_property(child, "default-state", NULL);
238 if (state) {
239 if (!strcmp(state, "keep"))
240 led.default_state = LEDS_GPIO_DEFSTATE_KEEP;
241 else if(!strcmp(state, "on"))
242 led.default_state = LEDS_GPIO_DEFSTATE_ON;
243 else
244 led.default_state = LEDS_GPIO_DEFSTATE_OFF;
245 }
232 246
233 ret = create_gpio_led(&led, &pdata->led_data[pdata->num_leds++], 247 ret = create_gpio_led(&led, &pdata->led_data[pdata->num_leds++],
234 &ofdev->dev, NULL); 248 &ofdev->dev, NULL);
diff --git a/drivers/leds/leds-lp3944.c b/drivers/leds/leds-lp3944.c
new file mode 100644
index 000000000000..5946208ba26e
--- /dev/null
+++ b/drivers/leds/leds-lp3944.c
@@ -0,0 +1,466 @@
1/*
2 * leds-lp3944.c - driver for National Semiconductor LP3944 Funlight Chip
3 *
4 * Copyright (C) 2009 Antonio Ospite <ospite@studenti.unina.it>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12/*
13 * I2C driver for National Semiconductor LP3944 Funlight Chip
14 * http://www.national.com/pf/LP/LP3944.html
15 *
16 * This helper chip can drive up to 8 leds, with two programmable DIM modes;
17 * it could even be used as a gpio expander but this driver assumes it is used
18 * as a led controller.
19 *
20 * The DIM modes are used to set _blink_ patterns for leds, the pattern is
21 * specified supplying two parameters:
22 * - period: from 0s to 1.6s
23 * - duty cycle: percentage of the period the led is on, from 0 to 100
24 *
25 * LP3944 can be found on Motorola A910 smartphone, where it drives the rgb
26 * leds, the camera flash light and the displays backlights.
27 */
28
29#include <linux/module.h>
30#include <linux/i2c.h>
31#include <linux/leds.h>
32#include <linux/mutex.h>
33#include <linux/workqueue.h>
34#include <linux/leds-lp3944.h>
35
36/* Read Only Registers */
37#define LP3944_REG_INPUT1 0x00 /* LEDs 0-7 InputRegister (Read Only) */
38#define LP3944_REG_REGISTER1 0x01 /* None (Read Only) */
39
40#define LP3944_REG_PSC0 0x02 /* Frequency Prescaler 0 (R/W) */
41#define LP3944_REG_PWM0 0x03 /* PWM Register 0 (R/W) */
42#define LP3944_REG_PSC1 0x04 /* Frequency Prescaler 1 (R/W) */
43#define LP3944_REG_PWM1 0x05 /* PWM Register 1 (R/W) */
44#define LP3944_REG_LS0 0x06 /* LEDs 0-3 Selector (R/W) */
45#define LP3944_REG_LS1 0x07 /* LEDs 4-7 Selector (R/W) */
46
47/* These registers are not used to control leds in LP3944, they can store
48 * arbitrary values which the chip will ignore.
49 */
50#define LP3944_REG_REGISTER8 0x08
51#define LP3944_REG_REGISTER9 0x09
52
53#define LP3944_DIM0 0
54#define LP3944_DIM1 1
55
56/* period in ms */
57#define LP3944_PERIOD_MIN 0
58#define LP3944_PERIOD_MAX 1600
59
60/* duty cycle is a percentage */
61#define LP3944_DUTY_CYCLE_MIN 0
62#define LP3944_DUTY_CYCLE_MAX 100
63
64#define ldev_to_led(c) container_of(c, struct lp3944_led_data, ldev)
65
66/* Saved data */
67struct lp3944_led_data {
68 u8 id;
69 enum lp3944_type type;
70 enum lp3944_status status;
71 struct led_classdev ldev;
72 struct i2c_client *client;
73 struct work_struct work;
74};
75
76struct lp3944_data {
77 struct mutex lock;
78 struct i2c_client *client;
79 struct lp3944_led_data leds[LP3944_LEDS_MAX];
80};
81
82static int lp3944_reg_read(struct i2c_client *client, u8 reg, u8 *value)
83{
84 int tmp;
85
86 tmp = i2c_smbus_read_byte_data(client, reg);
87 if (tmp < 0)
88 return -EINVAL;
89
90 *value = tmp;
91
92 return 0;
93}
94
95static int lp3944_reg_write(struct i2c_client *client, u8 reg, u8 value)
96{
97 return i2c_smbus_write_byte_data(client, reg, value);
98}
99
100/**
101 * Set the period for DIM status
102 *
103 * @client: the i2c client
104 * @dim: either LP3944_DIM0 or LP3944_DIM1
105 * @period: period of a blink, that is a on/off cycle, expressed in ms.
106 */
107static int lp3944_dim_set_period(struct i2c_client *client, u8 dim, u16 period)
108{
109 u8 psc_reg;
110 u8 psc_value;
111 int err;
112
113 if (dim == LP3944_DIM0)
114 psc_reg = LP3944_REG_PSC0;
115 else if (dim == LP3944_DIM1)
116 psc_reg = LP3944_REG_PSC1;
117 else
118 return -EINVAL;
119
120 /* Convert period to Prescaler value */
121 if (period > LP3944_PERIOD_MAX)
122 return -EINVAL;
123
124 psc_value = (period * 255) / LP3944_PERIOD_MAX;
125
126 err = lp3944_reg_write(client, psc_reg, psc_value);
127
128 return err;
129}
130
131/**
132 * Set the duty cycle for DIM status
133 *
134 * @client: the i2c client
135 * @dim: either LP3944_DIM0 or LP3944_DIM1
136 * @duty_cycle: percentage of a period during which a led is ON
137 */
138static int lp3944_dim_set_dutycycle(struct i2c_client *client, u8 dim,
139 u8 duty_cycle)
140{
141 u8 pwm_reg;
142 u8 pwm_value;
143 int err;
144
145 if (dim == LP3944_DIM0)
146 pwm_reg = LP3944_REG_PWM0;
147 else if (dim == LP3944_DIM1)
148 pwm_reg = LP3944_REG_PWM1;
149 else
150 return -EINVAL;
151
152 /* Convert duty cycle to PWM value */
153 if (duty_cycle > LP3944_DUTY_CYCLE_MAX)
154 return -EINVAL;
155
156 pwm_value = (duty_cycle * 255) / LP3944_DUTY_CYCLE_MAX;
157
158 err = lp3944_reg_write(client, pwm_reg, pwm_value);
159
160 return err;
161}
162
163/**
164 * Set the led status
165 *
166 * @led: a lp3944_led_data structure
167 * @status: one of LP3944_LED_STATUS_OFF
168 * LP3944_LED_STATUS_ON
169 * LP3944_LED_STATUS_DIM0
170 * LP3944_LED_STATUS_DIM1
171 */
172static int lp3944_led_set(struct lp3944_led_data *led, u8 status)
173{
174 struct lp3944_data *data = i2c_get_clientdata(led->client);
175 u8 id = led->id;
176 u8 reg;
177 u8 val = 0;
178 int err;
179
180 dev_dbg(&led->client->dev, "%s: %s, status before normalization:%d\n",
181 __func__, led->ldev.name, status);
182
183 switch (id) {
184 case LP3944_LED0:
185 case LP3944_LED1:
186 case LP3944_LED2:
187 case LP3944_LED3:
188 reg = LP3944_REG_LS0;
189 break;
190 case LP3944_LED4:
191 case LP3944_LED5:
192 case LP3944_LED6:
193 case LP3944_LED7:
194 id -= LP3944_LED4;
195 reg = LP3944_REG_LS1;
196 break;
197 default:
198 return -EINVAL;
199 }
200
201 if (status > LP3944_LED_STATUS_DIM1)
202 return -EINVAL;
203
204 /* invert only 0 and 1, leave unchanged the other values,
205 * remember we are abusing status to set blink patterns
206 */
207 if (led->type == LP3944_LED_TYPE_LED_INVERTED && status < 2)
208 status = 1 - status;
209
210 mutex_lock(&data->lock);
211 lp3944_reg_read(led->client, reg, &val);
212
213 val &= ~(LP3944_LED_STATUS_MASK << (id << 1));
214 val |= (status << (id << 1));
215
216 dev_dbg(&led->client->dev, "%s: %s, reg:%d id:%d status:%d val:%#x\n",
217 __func__, led->ldev.name, reg, id, status, val);
218
219 /* set led status */
220 err = lp3944_reg_write(led->client, reg, val);
221 mutex_unlock(&data->lock);
222
223 return err;
224}
225
226static int lp3944_led_set_blink(struct led_classdev *led_cdev,
227 unsigned long *delay_on,
228 unsigned long *delay_off)
229{
230 struct lp3944_led_data *led = ldev_to_led(led_cdev);
231 u16 period;
232 u8 duty_cycle;
233 int err;
234
235 /* units are in ms */
236 if (*delay_on + *delay_off > LP3944_PERIOD_MAX)
237 return -EINVAL;
238
239 if (*delay_on == 0 && *delay_off == 0) {
240 /* Special case: the leds subsystem requires a default user
241 * friendly blink pattern for the LED. Let's blink the led
242 * slowly (1Hz).
243 */
244 *delay_on = 500;
245 *delay_off = 500;
246 }
247
248 period = (*delay_on) + (*delay_off);
249
250 /* duty_cycle is the percentage of period during which the led is ON */
251 duty_cycle = 100 * (*delay_on) / period;
252
253 /* invert duty cycle for inverted leds, this has the same effect of
254 * swapping delay_on and delay_off
255 */
256 if (led->type == LP3944_LED_TYPE_LED_INVERTED)
257 duty_cycle = 100 - duty_cycle;
258
259 /* NOTE: using always the first DIM mode, this means that all leds
260 * will have the same blinking pattern.
261 *
262 * We could find a way later to have two leds blinking in hardware
263 * with different patterns at the same time, falling back to software
264 * control for the other ones.
265 */
266 err = lp3944_dim_set_period(led->client, LP3944_DIM0, period);
267 if (err)
268 return err;
269
270 err = lp3944_dim_set_dutycycle(led->client, LP3944_DIM0, duty_cycle);
271 if (err)
272 return err;
273
274 dev_dbg(&led->client->dev, "%s: OK hardware accelerated blink!\n",
275 __func__);
276
277 led->status = LP3944_LED_STATUS_DIM0;
278 schedule_work(&led->work);
279
280 return 0;
281}
282
283static void lp3944_led_set_brightness(struct led_classdev *led_cdev,
284 enum led_brightness brightness)
285{
286 struct lp3944_led_data *led = ldev_to_led(led_cdev);
287
288 dev_dbg(&led->client->dev, "%s: %s, %d\n",
289 __func__, led_cdev->name, brightness);
290
291 led->status = brightness;
292 schedule_work(&led->work);
293}
294
295static void lp3944_led_work(struct work_struct *work)
296{
297 struct lp3944_led_data *led;
298
299 led = container_of(work, struct lp3944_led_data, work);
300 lp3944_led_set(led, led->status);
301}
302
303static int lp3944_configure(struct i2c_client *client,
304 struct lp3944_data *data,
305 struct lp3944_platform_data *pdata)
306{
307 int i, err = 0;
308
309 for (i = 0; i < pdata->leds_size; i++) {
310 struct lp3944_led *pled = &pdata->leds[i];
311 struct lp3944_led_data *led = &data->leds[i];
312 led->client = client;
313 led->id = i;
314
315 switch (pled->type) {
316
317 case LP3944_LED_TYPE_LED:
318 case LP3944_LED_TYPE_LED_INVERTED:
319 led->type = pled->type;
320 led->status = pled->status;
321 led->ldev.name = pled->name;
322 led->ldev.max_brightness = 1;
323 led->ldev.brightness_set = lp3944_led_set_brightness;
324 led->ldev.blink_set = lp3944_led_set_blink;
325 led->ldev.flags = LED_CORE_SUSPENDRESUME;
326
327 INIT_WORK(&led->work, lp3944_led_work);
328 err = led_classdev_register(&client->dev, &led->ldev);
329 if (err < 0) {
330 dev_err(&client->dev,
331 "couldn't register LED %s\n",
332 led->ldev.name);
333 goto exit;
334 }
335
336 /* to expose the default value to userspace */
337 led->ldev.brightness = led->status;
338
339 /* Set the default led status */
340 err = lp3944_led_set(led, led->status);
341 if (err < 0) {
342 dev_err(&client->dev,
343 "%s couldn't set STATUS %d\n",
344 led->ldev.name, led->status);
345 goto exit;
346 }
347 break;
348
349 case LP3944_LED_TYPE_NONE:
350 default:
351 break;
352
353 }
354 }
355 return 0;
356
357exit:
358 if (i > 0)
359 for (i = i - 1; i >= 0; i--)
360 switch (pdata->leds[i].type) {
361
362 case LP3944_LED_TYPE_LED:
363 case LP3944_LED_TYPE_LED_INVERTED:
364 led_classdev_unregister(&data->leds[i].ldev);
365 cancel_work_sync(&data->leds[i].work);
366 break;
367
368 case LP3944_LED_TYPE_NONE:
369 default:
370 break;
371 }
372
373 return err;
374}
375
376static int __devinit lp3944_probe(struct i2c_client *client,
377 const struct i2c_device_id *id)
378{
379 struct lp3944_platform_data *lp3944_pdata = client->dev.platform_data;
380 struct lp3944_data *data;
381
382 if (lp3944_pdata == NULL) {
383 dev_err(&client->dev, "no platform data\n");
384 return -EINVAL;
385 }
386
387 /* Let's see whether this adapter can support what we need. */
388 if (!i2c_check_functionality(client->adapter,
389 I2C_FUNC_SMBUS_BYTE_DATA)) {
390 dev_err(&client->dev, "insufficient functionality!\n");
391 return -ENODEV;
392 }
393
394 data = kzalloc(sizeof(struct lp3944_data), GFP_KERNEL);
395 if (!data)
396 return -ENOMEM;
397
398 data->client = client;
399 i2c_set_clientdata(client, data);
400
401 mutex_init(&data->lock);
402
403 dev_info(&client->dev, "lp3944 enabled\n");
404
405 lp3944_configure(client, data, lp3944_pdata);
406 return 0;
407}
408
409static int __devexit lp3944_remove(struct i2c_client *client)
410{
411 struct lp3944_platform_data *pdata = client->dev.platform_data;
412 struct lp3944_data *data = i2c_get_clientdata(client);
413 int i;
414
415 for (i = 0; i < pdata->leds_size; i++)
416 switch (data->leds[i].type) {
417 case LP3944_LED_TYPE_LED:
418 case LP3944_LED_TYPE_LED_INVERTED:
419 led_classdev_unregister(&data->leds[i].ldev);
420 cancel_work_sync(&data->leds[i].work);
421 break;
422
423 case LP3944_LED_TYPE_NONE:
424 default:
425 break;
426 }
427
428 kfree(data);
429 i2c_set_clientdata(client, NULL);
430
431 return 0;
432}
433
434/* lp3944 i2c driver struct */
435static const struct i2c_device_id lp3944_id[] = {
436 {"lp3944", 0},
437 {}
438};
439
440MODULE_DEVICE_TABLE(i2c, lp3944_id);
441
442static struct i2c_driver lp3944_driver = {
443 .driver = {
444 .name = "lp3944",
445 },
446 .probe = lp3944_probe,
447 .remove = __devexit_p(lp3944_remove),
448 .id_table = lp3944_id,
449};
450
451static int __init lp3944_module_init(void)
452{
453 return i2c_add_driver(&lp3944_driver);
454}
455
456static void __exit lp3944_module_exit(void)
457{
458 i2c_del_driver(&lp3944_driver);
459}
460
461module_init(lp3944_module_init);
462module_exit(lp3944_module_exit);
463
464MODULE_AUTHOR("Antonio Ospite <ospite@studenti.unina.it>");
465MODULE_DESCRIPTION("LP3944 Fun Light Chip");
466MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index 3937244fdcab..dba8921240f2 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -35,7 +35,7 @@ struct pca9532_data {
35 struct pca9532_led leds[16]; 35 struct pca9532_led leds[16];
36 struct mutex update_lock; 36 struct mutex update_lock;
37 struct input_dev *idev; 37 struct input_dev *idev;
38 struct work_struct work; 38 struct work_struct work;
39 u8 pwm[2]; 39 u8 pwm[2];
40 u8 psc[2]; 40 u8 psc[2];
41}; 41};
@@ -87,14 +87,14 @@ static int pca9532_calcpwm(struct i2c_client *client, int pwm, int blink,
87 if (b > 0xFF) 87 if (b > 0xFF)
88 return -EINVAL; 88 return -EINVAL;
89 data->pwm[pwm] = b; 89 data->pwm[pwm] = b;
90 data->psc[pwm] = blink; 90 data->psc[pwm] = blink;
91 return 0; 91 return 0;
92} 92}
93 93
94static int pca9532_setpwm(struct i2c_client *client, int pwm) 94static int pca9532_setpwm(struct i2c_client *client, int pwm)
95{ 95{
96 struct pca9532_data *data = i2c_get_clientdata(client); 96 struct pca9532_data *data = i2c_get_clientdata(client);
97 mutex_lock(&data->update_lock); 97 mutex_lock(&data->update_lock);
98 i2c_smbus_write_byte_data(client, PCA9532_REG_PWM(pwm), 98 i2c_smbus_write_byte_data(client, PCA9532_REG_PWM(pwm),
99 data->pwm[pwm]); 99 data->pwm[pwm]);
100 i2c_smbus_write_byte_data(client, PCA9532_REG_PSC(pwm), 100 i2c_smbus_write_byte_data(client, PCA9532_REG_PSC(pwm),
@@ -132,11 +132,11 @@ static void pca9532_set_brightness(struct led_classdev *led_cdev,
132 led->state = PCA9532_ON; 132 led->state = PCA9532_ON;
133 else { 133 else {
134 led->state = PCA9532_PWM0; /* Thecus: hardcode one pwm */ 134 led->state = PCA9532_PWM0; /* Thecus: hardcode one pwm */
135 err = pca9532_calcpwm(led->client, 0, 0, value); 135 err = pca9532_calcpwm(led->client, 0, 0, value);
136 if (err) 136 if (err)
137 return; /* XXX: led api doesn't allow error code? */ 137 return; /* XXX: led api doesn't allow error code? */
138 } 138 }
139 schedule_work(&led->work); 139 schedule_work(&led->work);
140} 140}
141 141
142static int pca9532_set_blink(struct led_classdev *led_cdev, 142static int pca9532_set_blink(struct led_classdev *led_cdev,
@@ -145,7 +145,7 @@ static int pca9532_set_blink(struct led_classdev *led_cdev,
145 struct pca9532_led *led = ldev_to_led(led_cdev); 145 struct pca9532_led *led = ldev_to_led(led_cdev);
146 struct i2c_client *client = led->client; 146 struct i2c_client *client = led->client;
147 int psc; 147 int psc;
148 int err = 0; 148 int err = 0;
149 149
150 if (*delay_on == 0 && *delay_off == 0) { 150 if (*delay_on == 0 && *delay_off == 0) {
151 /* led subsystem ask us for a blink rate */ 151 /* led subsystem ask us for a blink rate */
@@ -157,11 +157,11 @@ static int pca9532_set_blink(struct led_classdev *led_cdev,
157 157
158 /* Thecus specific: only use PSC/PWM 0 */ 158 /* Thecus specific: only use PSC/PWM 0 */
159 psc = (*delay_on * 152-1)/1000; 159 psc = (*delay_on * 152-1)/1000;
160 err = pca9532_calcpwm(client, 0, psc, led_cdev->brightness); 160 err = pca9532_calcpwm(client, 0, psc, led_cdev->brightness);
161 if (err) 161 if (err)
162 return err; 162 return err;
163 schedule_work(&led->work); 163 schedule_work(&led->work);
164 return 0; 164 return 0;
165} 165}
166 166
167static int pca9532_event(struct input_dev *dev, unsigned int type, 167static int pca9532_event(struct input_dev *dev, unsigned int type,
@@ -178,15 +178,15 @@ static int pca9532_event(struct input_dev *dev, unsigned int type,
178 else 178 else
179 data->pwm[1] = 0; 179 data->pwm[1] = 0;
180 180
181 schedule_work(&data->work); 181 schedule_work(&data->work);
182 182
183 return 0; 183 return 0;
184} 184}
185 185
186static void pca9532_input_work(struct work_struct *work) 186static void pca9532_input_work(struct work_struct *work)
187{ 187{
188 struct pca9532_data *data; 188 struct pca9532_data *data;
189 data = container_of(work, struct pca9532_data, work); 189 data = container_of(work, struct pca9532_data, work);
190 mutex_lock(&data->update_lock); 190 mutex_lock(&data->update_lock);
191 i2c_smbus_write_byte_data(data->client, PCA9532_REG_PWM(1), 191 i2c_smbus_write_byte_data(data->client, PCA9532_REG_PWM(1),
192 data->pwm[1]); 192 data->pwm[1]);
@@ -195,11 +195,11 @@ static void pca9532_input_work(struct work_struct *work)
195 195
196static void pca9532_led_work(struct work_struct *work) 196static void pca9532_led_work(struct work_struct *work)
197{ 197{
198 struct pca9532_led *led; 198 struct pca9532_led *led;
199 led = container_of(work, struct pca9532_led, work); 199 led = container_of(work, struct pca9532_led, work);
200 if (led->state == PCA9532_PWM0) 200 if (led->state == PCA9532_PWM0)
201 pca9532_setpwm(led->client, 0); 201 pca9532_setpwm(led->client, 0);
202 pca9532_setled(led); 202 pca9532_setled(led);
203} 203}
204 204
205static int pca9532_configure(struct i2c_client *client, 205static int pca9532_configure(struct i2c_client *client,
@@ -232,7 +232,7 @@ static int pca9532_configure(struct i2c_client *client,
232 led->ldev.brightness = LED_OFF; 232 led->ldev.brightness = LED_OFF;
233 led->ldev.brightness_set = pca9532_set_brightness; 233 led->ldev.brightness_set = pca9532_set_brightness;
234 led->ldev.blink_set = pca9532_set_blink; 234 led->ldev.blink_set = pca9532_set_blink;
235 INIT_WORK(&led->work, pca9532_led_work); 235 INIT_WORK(&led->work, pca9532_led_work);
236 err = led_classdev_register(&client->dev, &led->ldev); 236 err = led_classdev_register(&client->dev, &led->ldev);
237 if (err < 0) { 237 if (err < 0) {
238 dev_err(&client->dev, 238 dev_err(&client->dev,
@@ -262,11 +262,11 @@ static int pca9532_configure(struct i2c_client *client,
262 BIT_MASK(SND_TONE); 262 BIT_MASK(SND_TONE);
263 data->idev->event = pca9532_event; 263 data->idev->event = pca9532_event;
264 input_set_drvdata(data->idev, data); 264 input_set_drvdata(data->idev, data);
265 INIT_WORK(&data->work, pca9532_input_work); 265 INIT_WORK(&data->work, pca9532_input_work);
266 err = input_register_device(data->idev); 266 err = input_register_device(data->idev);
267 if (err) { 267 if (err) {
268 input_free_device(data->idev); 268 input_free_device(data->idev);
269 cancel_work_sync(&data->work); 269 cancel_work_sync(&data->work);
270 data->idev = NULL; 270 data->idev = NULL;
271 goto exit; 271 goto exit;
272 } 272 }
@@ -283,13 +283,13 @@ exit:
283 break; 283 break;
284 case PCA9532_TYPE_LED: 284 case PCA9532_TYPE_LED:
285 led_classdev_unregister(&data->leds[i].ldev); 285 led_classdev_unregister(&data->leds[i].ldev);
286 cancel_work_sync(&data->leds[i].work); 286 cancel_work_sync(&data->leds[i].work);
287 break; 287 break;
288 case PCA9532_TYPE_N2100_BEEP: 288 case PCA9532_TYPE_N2100_BEEP:
289 if (data->idev != NULL) { 289 if (data->idev != NULL) {
290 input_unregister_device(data->idev); 290 input_unregister_device(data->idev);
291 input_free_device(data->idev); 291 input_free_device(data->idev);
292 cancel_work_sync(&data->work); 292 cancel_work_sync(&data->work);
293 data->idev = NULL; 293 data->idev = NULL;
294 } 294 }
295 break; 295 break;
@@ -340,13 +340,13 @@ static int pca9532_remove(struct i2c_client *client)
340 break; 340 break;
341 case PCA9532_TYPE_LED: 341 case PCA9532_TYPE_LED:
342 led_classdev_unregister(&data->leds[i].ldev); 342 led_classdev_unregister(&data->leds[i].ldev);
343 cancel_work_sync(&data->leds[i].work); 343 cancel_work_sync(&data->leds[i].work);
344 break; 344 break;
345 case PCA9532_TYPE_N2100_BEEP: 345 case PCA9532_TYPE_N2100_BEEP:
346 if (data->idev != NULL) { 346 if (data->idev != NULL) {
347 input_unregister_device(data->idev); 347 input_unregister_device(data->idev);
348 input_free_device(data->idev); 348 input_free_device(data->idev);
349 cancel_work_sync(&data->work); 349 cancel_work_sync(&data->work);
350 data->idev = NULL; 350 data->idev = NULL;
351 } 351 }
352 break; 352 break;
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h
index d4e8979735cb..9c3138265f8e 100644
--- a/drivers/lguest/lg.h
+++ b/drivers/lguest/lg.h
@@ -82,7 +82,7 @@ struct lg_cpu {
82 82
83struct lg_eventfd { 83struct lg_eventfd {
84 unsigned long addr; 84 unsigned long addr;
85 struct file *event; 85 struct eventfd_ctx *event;
86}; 86};
87 87
88struct lg_eventfd_map { 88struct lg_eventfd_map {
diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c
index 32e297121058..9f9a2953b383 100644
--- a/drivers/lguest/lguest_user.c
+++ b/drivers/lguest/lguest_user.c
@@ -50,7 +50,7 @@ static int add_eventfd(struct lguest *lg, unsigned long addr, int fd)
50 50
51 /* Now append new entry. */ 51 /* Now append new entry. */
52 new->map[new->num].addr = addr; 52 new->map[new->num].addr = addr;
53 new->map[new->num].event = eventfd_fget(fd); 53 new->map[new->num].event = eventfd_ctx_fdget(fd);
54 if (IS_ERR(new->map[new->num].event)) { 54 if (IS_ERR(new->map[new->num].event)) {
55 kfree(new); 55 kfree(new);
56 return PTR_ERR(new->map[new->num].event); 56 return PTR_ERR(new->map[new->num].event);
@@ -357,7 +357,7 @@ static int close(struct inode *inode, struct file *file)
357 357
358 /* Release any eventfds they registered. */ 358 /* Release any eventfds they registered. */
359 for (i = 0; i < lg->eventfds->num; i++) 359 for (i = 0; i < lg->eventfds->num; i++)
360 fput(lg->eventfds->map[i].event); 360 eventfd_ctx_put(lg->eventfds->map[i].event);
361 kfree(lg->eventfds); 361 kfree(lg->eventfds);
362 362
363 /* If lg->dead doesn't contain an error code it will be NULL or a 363 /* If lg->dead doesn't contain an error code it will be NULL or a
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index c3ae51584b12..3710ff88fc10 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -195,7 +195,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
195 struct dm_exception_store **store) 195 struct dm_exception_store **store)
196{ 196{
197 int r = 0; 197 int r = 0;
198 struct dm_exception_store_type *type; 198 struct dm_exception_store_type *type = NULL;
199 struct dm_exception_store *tmp_store; 199 struct dm_exception_store *tmp_store;
200 char persistent; 200 char persistent;
201 201
@@ -211,12 +211,15 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
211 } 211 }
212 212
213 persistent = toupper(*argv[1]); 213 persistent = toupper(*argv[1]);
214 if (persistent != 'P' && persistent != 'N') { 214 if (persistent == 'P')
215 type = get_type("P");
216 else if (persistent == 'N')
217 type = get_type("N");
218 else {
215 ti->error = "Persistent flag is not P or N"; 219 ti->error = "Persistent flag is not P or N";
216 return -EINVAL; 220 return -EINVAL;
217 } 221 }
218 222
219 type = get_type(&persistent);
220 if (!type) { 223 if (!type) {
221 ti->error = "Exception store type not recognised"; 224 ti->error = "Exception store type not recognised";
222 r = -EINVAL; 225 r = -EINVAL;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 4899ebe767c8..2cba557d9e61 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -495,7 +495,7 @@ int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
495 return 0; 495 return 0;
496 } 496 }
497 497
498 if (blk_stack_limits(limits, &q->limits, start) < 0) 498 if (blk_stack_limits(limits, &q->limits, start << 9) < 0)
499 DMWARN("%s: target device %s is misaligned", 499 DMWARN("%s: target device %s is misaligned",
500 dm_device_name(ti->table->md), bdevname(bdev, b)); 500 dm_device_name(ti->table->md), bdevname(bdev, b));
501 501
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 3c6d4ee8921d..9acd54a5cffb 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1017,7 +1017,7 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector,
1017 clone->bi_flags |= 1 << BIO_CLONED; 1017 clone->bi_flags |= 1 << BIO_CLONED;
1018 1018
1019 if (bio_integrity(bio)) { 1019 if (bio_integrity(bio)) {
1020 bio_integrity_clone(clone, bio, GFP_NOIO); 1020 bio_integrity_clone(clone, bio, GFP_NOIO, bs);
1021 bio_integrity_trim(clone, 1021 bio_integrity_trim(clone,
1022 bio_sector_offset(bio, idx, offset), len); 1022 bio_sector_offset(bio, idx, offset), len);
1023 } 1023 }
@@ -1045,7 +1045,7 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector,
1045 clone->bi_flags &= ~(1 << BIO_SEG_VALID); 1045 clone->bi_flags &= ~(1 << BIO_SEG_VALID);
1046 1046
1047 if (bio_integrity(bio)) { 1047 if (bio_integrity(bio)) {
1048 bio_integrity_clone(clone, bio, GFP_NOIO); 1048 bio_integrity_clone(clone, bio, GFP_NOIO, bs);
1049 1049
1050 if (idx != bio->bi_idx || clone->bi_size < bio->bi_size) 1050 if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
1051 bio_integrity_trim(clone, 1051 bio_integrity_trim(clone,
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 15c8b7b25a9b..5810fa906af0 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -166,8 +166,8 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
166 rdev->sectors = sectors * mddev->chunk_sectors; 166 rdev->sectors = sectors * mddev->chunk_sectors;
167 } 167 }
168 168
169 blk_queue_stack_limits(mddev->queue, 169 disk_stack_limits(mddev->gendisk, rdev->bdev,
170 rdev->bdev->bd_disk->queue); 170 rdev->data_offset << 9);
171 /* as we don't honour merge_bvec_fn, we must never risk 171 /* as we don't honour merge_bvec_fn, we must never risk
172 * violating it, so limit ->max_sector to one PAGE, as 172 * violating it, so limit ->max_sector to one PAGE, as
173 * a one page request is never in violation. 173 * a one page request is never in violation.
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 09be637d52cb..0f4a70c43ffc 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -3573,7 +3573,8 @@ suspend_lo_store(mddev_t *mddev, const char *buf, size_t len)
3573 char *e; 3573 char *e;
3574 unsigned long long new = simple_strtoull(buf, &e, 10); 3574 unsigned long long new = simple_strtoull(buf, &e, 10);
3575 3575
3576 if (mddev->pers->quiesce == NULL) 3576 if (mddev->pers == NULL ||
3577 mddev->pers->quiesce == NULL)
3577 return -EINVAL; 3578 return -EINVAL;
3578 if (buf == e || (*e && *e != '\n')) 3579 if (buf == e || (*e && *e != '\n'))
3579 return -EINVAL; 3580 return -EINVAL;
@@ -3601,7 +3602,8 @@ suspend_hi_store(mddev_t *mddev, const char *buf, size_t len)
3601 char *e; 3602 char *e;
3602 unsigned long long new = simple_strtoull(buf, &e, 10); 3603 unsigned long long new = simple_strtoull(buf, &e, 10);
3603 3604
3604 if (mddev->pers->quiesce == NULL) 3605 if (mddev->pers == NULL ||
3606 mddev->pers->quiesce == NULL)
3605 return -EINVAL; 3607 return -EINVAL;
3606 if (buf == e || (*e && *e != '\n')) 3608 if (buf == e || (*e && *e != '\n'))
3607 return -EINVAL; 3609 return -EINVAL;
@@ -3844,11 +3846,9 @@ static int md_alloc(dev_t dev, char *name)
3844 flush_scheduled_work(); 3846 flush_scheduled_work();
3845 3847
3846 mutex_lock(&disks_mutex); 3848 mutex_lock(&disks_mutex);
3847 if (mddev->gendisk) { 3849 error = -EEXIST;
3848 mutex_unlock(&disks_mutex); 3850 if (mddev->gendisk)
3849 mddev_put(mddev); 3851 goto abort;
3850 return -EEXIST;
3851 }
3852 3852
3853 if (name) { 3853 if (name) {
3854 /* Need to ensure that 'name' is not a duplicate. 3854 /* Need to ensure that 'name' is not a duplicate.
@@ -3860,17 +3860,15 @@ static int md_alloc(dev_t dev, char *name)
3860 if (mddev2->gendisk && 3860 if (mddev2->gendisk &&
3861 strcmp(mddev2->gendisk->disk_name, name) == 0) { 3861 strcmp(mddev2->gendisk->disk_name, name) == 0) {
3862 spin_unlock(&all_mddevs_lock); 3862 spin_unlock(&all_mddevs_lock);
3863 return -EEXIST; 3863 goto abort;
3864 } 3864 }
3865 spin_unlock(&all_mddevs_lock); 3865 spin_unlock(&all_mddevs_lock);
3866 } 3866 }
3867 3867
3868 error = -ENOMEM;
3868 mddev->queue = blk_alloc_queue(GFP_KERNEL); 3869 mddev->queue = blk_alloc_queue(GFP_KERNEL);
3869 if (!mddev->queue) { 3870 if (!mddev->queue)
3870 mutex_unlock(&disks_mutex); 3871 goto abort;
3871 mddev_put(mddev);
3872 return -ENOMEM;
3873 }
3874 mddev->queue->queuedata = mddev; 3872 mddev->queue->queuedata = mddev;
3875 3873
3876 /* Can be unlocked because the queue is new: no concurrency */ 3874 /* Can be unlocked because the queue is new: no concurrency */
@@ -3880,11 +3878,9 @@ static int md_alloc(dev_t dev, char *name)
3880 3878
3881 disk = alloc_disk(1 << shift); 3879 disk = alloc_disk(1 << shift);
3882 if (!disk) { 3880 if (!disk) {
3883 mutex_unlock(&disks_mutex);
3884 blk_cleanup_queue(mddev->queue); 3881 blk_cleanup_queue(mddev->queue);
3885 mddev->queue = NULL; 3882 mddev->queue = NULL;
3886 mddev_put(mddev); 3883 goto abort;
3887 return -ENOMEM;
3888 } 3884 }
3889 disk->major = MAJOR(mddev->unit); 3885 disk->major = MAJOR(mddev->unit);
3890 disk->first_minor = unit << shift; 3886 disk->first_minor = unit << shift;
@@ -3906,16 +3902,22 @@ static int md_alloc(dev_t dev, char *name)
3906 mddev->gendisk = disk; 3902 mddev->gendisk = disk;
3907 error = kobject_init_and_add(&mddev->kobj, &md_ktype, 3903 error = kobject_init_and_add(&mddev->kobj, &md_ktype,
3908 &disk_to_dev(disk)->kobj, "%s", "md"); 3904 &disk_to_dev(disk)->kobj, "%s", "md");
3909 mutex_unlock(&disks_mutex); 3905 if (error) {
3910 if (error) 3906 /* This isn't possible, but as kobject_init_and_add is marked
3907 * __must_check, we must do something with the result
3908 */
3911 printk(KERN_WARNING "md: cannot register %s/md - name in use\n", 3909 printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
3912 disk->disk_name); 3910 disk->disk_name);
3913 else { 3911 error = 0;
3912 }
3913 abort:
3914 mutex_unlock(&disks_mutex);
3915 if (!error) {
3914 kobject_uevent(&mddev->kobj, KOBJ_ADD); 3916 kobject_uevent(&mddev->kobj, KOBJ_ADD);
3915 mddev->sysfs_state = sysfs_get_dirent(mddev->kobj.sd, "array_state"); 3917 mddev->sysfs_state = sysfs_get_dirent(mddev->kobj.sd, "array_state");
3916 } 3918 }
3917 mddev_put(mddev); 3919 mddev_put(mddev);
3918 return 0; 3920 return error;
3919} 3921}
3920 3922
3921static struct kobject *md_probe(dev_t dev, int *part, void *data) 3923static struct kobject *md_probe(dev_t dev, int *part, void *data)
@@ -6334,10 +6336,16 @@ void md_do_sync(mddev_t *mddev)
6334 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 6336 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
6335 } 6337 }
6336 6338
6337 if (j >= mddev->resync_max) 6339 while (j >= mddev->resync_max && !kthread_should_stop()) {
6338 wait_event(mddev->recovery_wait, 6340 /* As this condition is controlled by user-space,
6339 mddev->resync_max > j 6341 * we can block indefinitely, so use '_interruptible'
6340 || kthread_should_stop()); 6342 * to avoid triggering warnings.
6343 */
6344 flush_signals(current); /* just in case */
6345 wait_event_interruptible(mddev->recovery_wait,
6346 mddev->resync_max > j
6347 || kthread_should_stop());
6348 }
6341 6349
6342 if (kthread_should_stop()) 6350 if (kthread_should_stop())
6343 goto interrupted; 6351 goto interrupted;
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index cbe368fa6598..237fe3fd235c 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -294,7 +294,8 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
294 for (path = first; path <= last; path++) 294 for (path = first; path <= last; path++)
295 if ((p=conf->multipaths+path)->rdev == NULL) { 295 if ((p=conf->multipaths+path)->rdev == NULL) {
296 q = rdev->bdev->bd_disk->queue; 296 q = rdev->bdev->bd_disk->queue;
297 blk_queue_stack_limits(mddev->queue, q); 297 disk_stack_limits(mddev->gendisk, rdev->bdev,
298 rdev->data_offset << 9);
298 299
299 /* as we don't honour merge_bvec_fn, we must never risk 300 /* as we don't honour merge_bvec_fn, we must never risk
300 * violating it, so limit ->max_sector to one PAGE, as 301 * violating it, so limit ->max_sector to one PAGE, as
@@ -463,9 +464,9 @@ static int multipath_run (mddev_t *mddev)
463 464
464 disk = conf->multipaths + disk_idx; 465 disk = conf->multipaths + disk_idx;
465 disk->rdev = rdev; 466 disk->rdev = rdev;
467 disk_stack_limits(mddev->gendisk, rdev->bdev,
468 rdev->data_offset << 9);
466 469
467 blk_queue_stack_limits(mddev->queue,
468 rdev->bdev->bd_disk->queue);
469 /* as we don't honour merge_bvec_fn, we must never risk 470 /* as we don't honour merge_bvec_fn, we must never risk
470 * violating it, not that we ever expect a device with 471 * violating it, not that we ever expect a device with
471 * a merge_bvec_fn to be involved in multipath */ 472 * a merge_bvec_fn to be involved in multipath */
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index ab4a489d8695..335f490dcad6 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -170,8 +170,8 @@ static int create_strip_zones(mddev_t *mddev)
170 } 170 }
171 dev[j] = rdev1; 171 dev[j] = rdev1;
172 172
173 blk_queue_stack_limits(mddev->queue, 173 disk_stack_limits(mddev->gendisk, rdev1->bdev,
174 rdev1->bdev->bd_disk->queue); 174 rdev1->data_offset << 9);
175 /* as we don't honour merge_bvec_fn, we must never risk 175 /* as we don't honour merge_bvec_fn, we must never risk
176 * violating it, so limit ->max_sector to one PAGE, as 176 * violating it, so limit ->max_sector to one PAGE, as
177 * a one page request is never in violation. 177 * a one page request is never in violation.
@@ -250,6 +250,11 @@ static int create_strip_zones(mddev_t *mddev)
250 mddev->chunk_sectors << 9); 250 mddev->chunk_sectors << 9);
251 goto abort; 251 goto abort;
252 } 252 }
253
254 blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
255 blk_queue_io_opt(mddev->queue,
256 (mddev->chunk_sectors << 9) * mddev->raid_disks);
257
253 printk(KERN_INFO "raid0: done.\n"); 258 printk(KERN_INFO "raid0: done.\n");
254 mddev->private = conf; 259 mddev->private = conf;
255 return 0; 260 return 0;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 89939a7aef57..0569efba0c02 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1123,8 +1123,8 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1123 for (mirror = first; mirror <= last; mirror++) 1123 for (mirror = first; mirror <= last; mirror++)
1124 if ( !(p=conf->mirrors+mirror)->rdev) { 1124 if ( !(p=conf->mirrors+mirror)->rdev) {
1125 1125
1126 blk_queue_stack_limits(mddev->queue, 1126 disk_stack_limits(mddev->gendisk, rdev->bdev,
1127 rdev->bdev->bd_disk->queue); 1127 rdev->data_offset << 9);
1128 /* as we don't honour merge_bvec_fn, we must never risk 1128 /* as we don't honour merge_bvec_fn, we must never risk
1129 * violating it, so limit ->max_sector to one PAGE, as 1129 * violating it, so limit ->max_sector to one PAGE, as
1130 * a one page request is never in violation. 1130 * a one page request is never in violation.
@@ -1988,9 +1988,8 @@ static int run(mddev_t *mddev)
1988 disk = conf->mirrors + disk_idx; 1988 disk = conf->mirrors + disk_idx;
1989 1989
1990 disk->rdev = rdev; 1990 disk->rdev = rdev;
1991 1991 disk_stack_limits(mddev->gendisk, rdev->bdev,
1992 blk_queue_stack_limits(mddev->queue, 1992 rdev->data_offset << 9);
1993 rdev->bdev->bd_disk->queue);
1994 /* as we don't honour merge_bvec_fn, we must never risk 1993 /* as we don't honour merge_bvec_fn, we must never risk
1995 * violating it, so limit ->max_sector to one PAGE, as 1994 * violating it, so limit ->max_sector to one PAGE, as
1996 * a one page request is never in violation. 1995 * a one page request is never in violation.
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index ae12ceafe10c..7298a5e5a183 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1151,8 +1151,8 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1151 for ( ; mirror <= last ; mirror++) 1151 for ( ; mirror <= last ; mirror++)
1152 if ( !(p=conf->mirrors+mirror)->rdev) { 1152 if ( !(p=conf->mirrors+mirror)->rdev) {
1153 1153
1154 blk_queue_stack_limits(mddev->queue, 1154 disk_stack_limits(mddev->gendisk, rdev->bdev,
1155 rdev->bdev->bd_disk->queue); 1155 rdev->data_offset << 9);
1156 /* as we don't honour merge_bvec_fn, we must never risk 1156 /* as we don't honour merge_bvec_fn, we must never risk
1157 * violating it, so limit ->max_sector to one PAGE, as 1157 * violating it, so limit ->max_sector to one PAGE, as
1158 * a one page request is never in violation. 1158 * a one page request is never in violation.
@@ -2044,7 +2044,7 @@ raid10_size(mddev_t *mddev, sector_t sectors, int raid_disks)
2044static int run(mddev_t *mddev) 2044static int run(mddev_t *mddev)
2045{ 2045{
2046 conf_t *conf; 2046 conf_t *conf;
2047 int i, disk_idx; 2047 int i, disk_idx, chunk_size;
2048 mirror_info_t *disk; 2048 mirror_info_t *disk;
2049 mdk_rdev_t *rdev; 2049 mdk_rdev_t *rdev;
2050 int nc, fc, fo; 2050 int nc, fc, fo;
@@ -2130,6 +2130,14 @@ static int run(mddev_t *mddev)
2130 spin_lock_init(&conf->device_lock); 2130 spin_lock_init(&conf->device_lock);
2131 mddev->queue->queue_lock = &conf->device_lock; 2131 mddev->queue->queue_lock = &conf->device_lock;
2132 2132
2133 chunk_size = mddev->chunk_sectors << 9;
2134 blk_queue_io_min(mddev->queue, chunk_size);
2135 if (conf->raid_disks % conf->near_copies)
2136 blk_queue_io_opt(mddev->queue, chunk_size * conf->raid_disks);
2137 else
2138 blk_queue_io_opt(mddev->queue, chunk_size *
2139 (conf->raid_disks / conf->near_copies));
2140
2133 list_for_each_entry(rdev, &mddev->disks, same_set) { 2141 list_for_each_entry(rdev, &mddev->disks, same_set) {
2134 disk_idx = rdev->raid_disk; 2142 disk_idx = rdev->raid_disk;
2135 if (disk_idx >= mddev->raid_disks 2143 if (disk_idx >= mddev->raid_disks
@@ -2138,9 +2146,8 @@ static int run(mddev_t *mddev)
2138 disk = conf->mirrors + disk_idx; 2146 disk = conf->mirrors + disk_idx;
2139 2147
2140 disk->rdev = rdev; 2148 disk->rdev = rdev;
2141 2149 disk_stack_limits(mddev->gendisk, rdev->bdev,
2142 blk_queue_stack_limits(mddev->queue, 2150 rdev->data_offset << 9);
2143 rdev->bdev->bd_disk->queue);
2144 /* as we don't honour merge_bvec_fn, we must never risk 2151 /* as we don't honour merge_bvec_fn, we must never risk
2145 * violating it, so limit ->max_sector to one PAGE, as 2152 * violating it, so limit ->max_sector to one PAGE, as
2146 * a one page request is never in violation. 2153 * a one page request is never in violation.
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index f9f991e6e138..37835538b58e 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3699,13 +3699,21 @@ static int make_request(struct request_queue *q, struct bio * bi)
3699 goto retry; 3699 goto retry;
3700 } 3700 }
3701 } 3701 }
3702 /* FIXME what if we get a false positive because these 3702
3703 * are being updated. 3703 if (bio_data_dir(bi) == WRITE &&
3704 */ 3704 logical_sector >= mddev->suspend_lo &&
3705 if (logical_sector >= mddev->suspend_lo &&
3706 logical_sector < mddev->suspend_hi) { 3705 logical_sector < mddev->suspend_hi) {
3707 release_stripe(sh); 3706 release_stripe(sh);
3708 schedule(); 3707 /* As the suspend_* range is controlled by
3708 * userspace, we want an interruptible
3709 * wait.
3710 */
3711 flush_signals(current);
3712 prepare_to_wait(&conf->wait_for_overlap,
3713 &w, TASK_INTERRUPTIBLE);
3714 if (logical_sector >= mddev->suspend_lo &&
3715 logical_sector < mddev->suspend_hi)
3716 schedule();
3709 goto retry; 3717 goto retry;
3710 } 3718 }
3711 3719
@@ -4452,7 +4460,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
4452static int run(mddev_t *mddev) 4460static int run(mddev_t *mddev)
4453{ 4461{
4454 raid5_conf_t *conf; 4462 raid5_conf_t *conf;
4455 int working_disks = 0; 4463 int working_disks = 0, chunk_size;
4456 mdk_rdev_t *rdev; 4464 mdk_rdev_t *rdev;
4457 4465
4458 if (mddev->recovery_cp != MaxSector) 4466 if (mddev->recovery_cp != MaxSector)
@@ -4607,6 +4615,14 @@ static int run(mddev_t *mddev)
4607 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); 4615 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
4608 4616
4609 blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec); 4617 blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec);
4618 chunk_size = mddev->chunk_sectors << 9;
4619 blk_queue_io_min(mddev->queue, chunk_size);
4620 blk_queue_io_opt(mddev->queue, chunk_size *
4621 (conf->raid_disks - conf->max_degraded));
4622
4623 list_for_each_entry(rdev, &mddev->disks, same_set)
4624 disk_stack_limits(mddev->gendisk, rdev->bdev,
4625 rdev->data_offset << 9);
4610 4626
4611 return 0; 4627 return 0;
4612abort: 4628abort:
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 240608cc7ae9..a461017ce5ce 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1313,6 +1313,12 @@ static int mmc_spi_probe(struct spi_device *spi)
1313 struct mmc_spi_host *host; 1313 struct mmc_spi_host *host;
1314 int status; 1314 int status;
1315 1315
1316 /* We rely on full duplex transfers, mostly to reduce
1317 * per-transfer overheads (by making fewer transfers).
1318 */
1319 if (spi->master->flags & SPI_MASTER_HALF_DUPLEX)
1320 return -EINVAL;
1321
1316 /* MMC and SD specs only seem to care that sampling is on the 1322 /* MMC and SD specs only seem to care that sampling is on the
1317 * rising edge ... meaning SPI modes 0 or 3. So either SPI mode 1323 * rising edge ... meaning SPI modes 0 or 3. So either SPI mode
1318 * should be legit. We'll use mode 0 since the steady state is 0, 1324 * should be legit. We'll use mode 0 since the steady state is 0,
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
index 5011fa73f918..1479da6d3aa6 100644
--- a/drivers/mtd/cmdlinepart.c
+++ b/drivers/mtd/cmdlinepart.c
@@ -194,7 +194,7 @@ static struct mtd_partition * newpart(char *s,
194 parts[this_part].name = extra_mem; 194 parts[this_part].name = extra_mem;
195 extra_mem += name_len + 1; 195 extra_mem += name_len + 1;
196 196
197 dbg(("partition %d: name <%s>, offset %x, size %x, mask flags %x\n", 197 dbg(("partition %d: name <%s>, offset %llx, size %llx, mask flags %x\n",
198 this_part, 198 this_part,
199 parts[this_part].name, 199 parts[this_part].name,
200 parts[this_part].offset, 200 parts[this_part].offset,
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 59c46126a5ce..ae5fe91867e1 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -54,7 +54,7 @@
54#define SR_SRWD 0x80 /* SR write protect */ 54#define SR_SRWD 0x80 /* SR write protect */
55 55
56/* Define max times to check status register before we give up. */ 56/* Define max times to check status register before we give up. */
57#define MAX_READY_WAIT_JIFFIES (10 * HZ) /* eg. M25P128 specs 6s max sector erase */ 57#define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */
58#define CMD_SIZE 4 58#define CMD_SIZE 4
59 59
60#ifdef CONFIG_M25PXX_USE_FAST_READ 60#ifdef CONFIG_M25PXX_USE_FAST_READ
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
index 73f05227dc8c..d8cf29c01cc4 100644
--- a/drivers/mtd/inftlcore.c
+++ b/drivers/mtd/inftlcore.c
@@ -226,7 +226,7 @@ static u16 INFTL_findfreeblock(struct INFTLrecord *inftl, int desperate)
226 if (!desperate && inftl->numfreeEUNs < 2) { 226 if (!desperate && inftl->numfreeEUNs < 2) {
227 DEBUG(MTD_DEBUG_LEVEL1, "INFTL: there are too few free " 227 DEBUG(MTD_DEBUG_LEVEL1, "INFTL: there are too few free "
228 "EUNs (%d)\n", inftl->numfreeEUNs); 228 "EUNs (%d)\n", inftl->numfreeEUNs);
229 return 0xffff; 229 return BLOCK_NIL;
230 } 230 }
231 231
232 /* Scan for a free block */ 232 /* Scan for a free block */
@@ -281,7 +281,8 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
281 silly = MAX_LOOPS; 281 silly = MAX_LOOPS;
282 while (thisEUN < inftl->nb_blocks) { 282 while (thisEUN < inftl->nb_blocks) {
283 for (block = 0; block < inftl->EraseSize/SECTORSIZE; block ++) { 283 for (block = 0; block < inftl->EraseSize/SECTORSIZE; block ++) {
284 if ((BlockMap[block] != 0xffff) || BlockDeleted[block]) 284 if ((BlockMap[block] != BLOCK_NIL) ||
285 BlockDeleted[block])
285 continue; 286 continue;
286 287
287 if (inftl_read_oob(mtd, (thisEUN * inftl->EraseSize) 288 if (inftl_read_oob(mtd, (thisEUN * inftl->EraseSize)
@@ -525,7 +526,7 @@ static inline u16 INFTL_findwriteunit(struct INFTLrecord *inftl, unsigned block)
525 if (!silly--) { 526 if (!silly--) {
526 printk(KERN_WARNING "INFTL: infinite loop in " 527 printk(KERN_WARNING "INFTL: infinite loop in "
527 "Virtual Unit Chain 0x%x\n", thisVUC); 528 "Virtual Unit Chain 0x%x\n", thisVUC);
528 return 0xffff; 529 return BLOCK_NIL;
529 } 530 }
530 531
531 /* Skip to next block in chain */ 532 /* Skip to next block in chain */
@@ -549,7 +550,7 @@ hitused:
549 * waiting to be picked up. We're going to have to fold 550 * waiting to be picked up. We're going to have to fold
550 * a chain to make room. 551 * a chain to make room.
551 */ 552 */
552 thisEUN = INFTL_makefreeblock(inftl, 0xffff); 553 thisEUN = INFTL_makefreeblock(inftl, BLOCK_NIL);
553 554
554 /* 555 /*
555 * Hopefully we free something, lets try again. 556 * Hopefully we free something, lets try again.
@@ -631,7 +632,7 @@ hitused:
631 632
632 printk(KERN_WARNING "INFTL: error folding to make room for Virtual " 633 printk(KERN_WARNING "INFTL: error folding to make room for Virtual "
633 "Unit Chain 0x%x\n", thisVUC); 634 "Unit Chain 0x%x\n", thisVUC);
634 return 0xffff; 635 return BLOCK_NIL;
635} 636}
636 637
637/* 638/*
diff --git a/drivers/mtd/maps/integrator-flash.c b/drivers/mtd/maps/integrator-flash.c
index b08a798ee254..2aac41bde8b3 100644
--- a/drivers/mtd/maps/integrator-flash.c
+++ b/drivers/mtd/maps/integrator-flash.c
@@ -42,10 +42,8 @@
42#include <mach/hardware.h> 42#include <mach/hardware.h>
43#include <asm/system.h> 43#include <asm/system.h>
44 44
45#define SUBDEV_NAME_SIZE (BUS_ID_SIZE + 2)
46
47struct armflash_subdev_info { 45struct armflash_subdev_info {
48 char name[SUBDEV_NAME_SIZE]; 46 char *name;
49 struct mtd_info *mtd; 47 struct mtd_info *mtd;
50 struct map_info map; 48 struct map_info map;
51 struct flash_platform_data *plat; 49 struct flash_platform_data *plat;
@@ -134,6 +132,8 @@ static void armflash_subdev_remove(struct armflash_subdev_info *subdev)
134 map_destroy(subdev->mtd); 132 map_destroy(subdev->mtd);
135 if (subdev->map.virt) 133 if (subdev->map.virt)
136 iounmap(subdev->map.virt); 134 iounmap(subdev->map.virt);
135 kfree(subdev->name);
136 subdev->name = NULL;
137 release_mem_region(subdev->map.phys, subdev->map.size); 137 release_mem_region(subdev->map.phys, subdev->map.size);
138} 138}
139 139
@@ -177,16 +177,22 @@ static int armflash_probe(struct platform_device *dev)
177 177
178 if (nr == 1) 178 if (nr == 1)
179 /* No MTD concatenation, just use the default name */ 179 /* No MTD concatenation, just use the default name */
180 snprintf(subdev->name, SUBDEV_NAME_SIZE, "%s", 180 subdev->name = kstrdup(dev_name(&dev->dev), GFP_KERNEL);
181 dev_name(&dev->dev));
182 else 181 else
183 snprintf(subdev->name, SUBDEV_NAME_SIZE, "%s-%d", 182 subdev->name = kasprintf(GFP_KERNEL, "%s-%d",
184 dev_name(&dev->dev), i); 183 dev_name(&dev->dev), i);
184 if (!subdev->name) {
185 err = -ENOMEM;
186 break;
187 }
185 subdev->plat = plat; 188 subdev->plat = plat;
186 189
187 err = armflash_subdev_probe(subdev, res); 190 err = armflash_subdev_probe(subdev, res);
188 if (err) 191 if (err) {
192 kfree(subdev->name);
193 subdev->name = NULL;
189 break; 194 break;
195 }
190 } 196 }
191 info->nr_subdev = i; 197 info->nr_subdev = i;
192 198
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 2802992b39da..20c828ba9405 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -534,7 +534,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
534 &num_partitions); 534 &num_partitions);
535 535
536 if ((!partitions) || (num_partitions == 0)) { 536 if ((!partitions) || (num_partitions == 0)) {
537 printk(KERN_ERR "atmel_nand: No parititions defined, or unsupported device.\n"); 537 printk(KERN_ERR "atmel_nand: No partitions defined, or unsupported device.\n");
538 res = ENXIO; 538 res = ENXIO;
539 goto err_no_partitions; 539 goto err_no_partitions;
540 } 540 }
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 0cd76f89f4b0..ebd07e95b814 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -11,6 +11,8 @@
11#include <linux/platform_device.h> 11#include <linux/platform_device.h>
12#include <linux/dma-mapping.h> 12#include <linux/dma-mapping.h>
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/jiffies.h>
15#include <linux/sched.h>
14#include <linux/mtd/mtd.h> 16#include <linux/mtd/mtd.h>
15#include <linux/mtd/nand.h> 17#include <linux/mtd/nand.h>
16#include <linux/mtd/partitions.h> 18#include <linux/mtd/partitions.h>
@@ -541,7 +543,7 @@ static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
541 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, 543 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
542 mtd); 544 mtd);
543 unsigned long timeo = jiffies; 545 unsigned long timeo = jiffies;
544 int status, state = this->state; 546 int status = NAND_STATUS_FAIL, state = this->state;
545 547
546 if (state == FL_ERASING) 548 if (state == FL_ERASING)
547 timeo += (HZ * 400) / 1000; 549 timeo += (HZ * 400) / 1000;
@@ -556,8 +558,9 @@ static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
556 558
557 while (time_before(jiffies, timeo)) { 559 while (time_before(jiffies, timeo)) {
558 status = __raw_readb(this->IO_ADDR_R); 560 status = __raw_readb(this->IO_ADDR_R);
559 if (!(status & 0x40)) 561 if (status & NAND_STATUS_READY)
560 break; 562 break;
563 cond_resched();
561 } 564 }
562 return status; 565 return status;
563} 566}
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
index e3f8495a94c2..fb86cacd5bdb 100644
--- a/drivers/mtd/nftlcore.c
+++ b/drivers/mtd/nftlcore.c
@@ -208,7 +208,7 @@ static u16 NFTL_findfreeblock(struct NFTLrecord *nftl, int desperate )
208 /* Normally, we force a fold to happen before we run out of free blocks completely */ 208 /* Normally, we force a fold to happen before we run out of free blocks completely */
209 if (!desperate && nftl->numfreeEUNs < 2) { 209 if (!desperate && nftl->numfreeEUNs < 2) {
210 DEBUG(MTD_DEBUG_LEVEL1, "NFTL_findfreeblock: there are too few free EUNs\n"); 210 DEBUG(MTD_DEBUG_LEVEL1, "NFTL_findfreeblock: there are too few free EUNs\n");
211 return 0xffff; 211 return BLOCK_NIL;
212 } 212 }
213 213
214 /* Scan for a free block */ 214 /* Scan for a free block */
@@ -230,11 +230,11 @@ static u16 NFTL_findfreeblock(struct NFTLrecord *nftl, int desperate )
230 printk("Argh! No free blocks found! LastFreeEUN = %d, " 230 printk("Argh! No free blocks found! LastFreeEUN = %d, "
231 "FirstEUN = %d\n", nftl->LastFreeEUN, 231 "FirstEUN = %d\n", nftl->LastFreeEUN,
232 le16_to_cpu(nftl->MediaHdr.FirstPhysicalEUN)); 232 le16_to_cpu(nftl->MediaHdr.FirstPhysicalEUN));
233 return 0xffff; 233 return BLOCK_NIL;
234 } 234 }
235 } while (pot != nftl->LastFreeEUN); 235 } while (pot != nftl->LastFreeEUN);
236 236
237 return 0xffff; 237 return BLOCK_NIL;
238} 238}
239 239
240static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned pendingblock ) 240static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned pendingblock )
@@ -431,7 +431,7 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
431 431
432 /* add the header so that it is now a valid chain */ 432 /* add the header so that it is now a valid chain */
433 oob.u.a.VirtUnitNum = oob.u.a.SpareVirtUnitNum = cpu_to_le16(thisVUC); 433 oob.u.a.VirtUnitNum = oob.u.a.SpareVirtUnitNum = cpu_to_le16(thisVUC);
434 oob.u.a.ReplUnitNum = oob.u.a.SpareReplUnitNum = 0xffff; 434 oob.u.a.ReplUnitNum = oob.u.a.SpareReplUnitNum = BLOCK_NIL;
435 435
436 nftl_write_oob(mtd, (nftl->EraseSize * targetEUN) + 8, 436 nftl_write_oob(mtd, (nftl->EraseSize * targetEUN) + 8,
437 8, &retlen, (char *)&oob.u); 437 8, &retlen, (char *)&oob.u);
@@ -515,7 +515,7 @@ static u16 NFTL_makefreeblock( struct NFTLrecord *nftl , unsigned pendingblock)
515 if (ChainLength < 2) { 515 if (ChainLength < 2) {
516 printk(KERN_WARNING "No Virtual Unit Chains available for folding. " 516 printk(KERN_WARNING "No Virtual Unit Chains available for folding. "
517 "Failing request\n"); 517 "Failing request\n");
518 return 0xffff; 518 return BLOCK_NIL;
519 } 519 }
520 520
521 return NFTL_foldchain (nftl, LongestChain, pendingblock); 521 return NFTL_foldchain (nftl, LongestChain, pendingblock);
@@ -578,7 +578,7 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block)
578 printk(KERN_WARNING 578 printk(KERN_WARNING
579 "Infinite loop in Virtual Unit Chain 0x%x\n", 579 "Infinite loop in Virtual Unit Chain 0x%x\n",
580 thisVUC); 580 thisVUC);
581 return 0xffff; 581 return BLOCK_NIL;
582 } 582 }
583 583
584 /* Skip to next block in chain */ 584 /* Skip to next block in chain */
@@ -601,7 +601,7 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block)
601 //u16 startEUN = nftl->EUNtable[thisVUC]; 601 //u16 startEUN = nftl->EUNtable[thisVUC];
602 602
603 //printk("Write to VirtualUnitChain %d, calling makefreeblock()\n", thisVUC); 603 //printk("Write to VirtualUnitChain %d, calling makefreeblock()\n", thisVUC);
604 writeEUN = NFTL_makefreeblock(nftl, 0xffff); 604 writeEUN = NFTL_makefreeblock(nftl, BLOCK_NIL);
605 605
606 if (writeEUN == BLOCK_NIL) { 606 if (writeEUN == BLOCK_NIL) {
607 /* OK, we accept that the above comment is 607 /* OK, we accept that the above comment is
@@ -673,7 +673,7 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block)
673 673
674 printk(KERN_WARNING "Error folding to make room for Virtual Unit Chain 0x%x\n", 674 printk(KERN_WARNING "Error folding to make room for Virtual Unit Chain 0x%x\n",
675 thisVUC); 675 thisVUC);
676 return 0xffff; 676 return BLOCK_NIL;
677} 677}
678 678
679static int nftl_writeblock(struct mtd_blktrans_dev *mbd, unsigned long block, 679static int nftl_writeblock(struct mtd_blktrans_dev *mbd, unsigned long block,
diff --git a/drivers/net/atl1c/atl1c_ethtool.c b/drivers/net/atl1c/atl1c_ethtool.c
index e4afbd628c23..607007d75b6f 100644
--- a/drivers/net/atl1c/atl1c_ethtool.c
+++ b/drivers/net/atl1c/atl1c_ethtool.c
@@ -281,6 +281,8 @@ static int atl1c_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
281 if (wol->wolopts & WAKE_PHY) 281 if (wol->wolopts & WAKE_PHY)
282 adapter->wol |= AT_WUFC_LNKC; 282 adapter->wol |= AT_WUFC_LNKC;
283 283
284 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
285
284 return 0; 286 return 0;
285} 287}
286 288
diff --git a/drivers/net/atl1e/atl1e_ethtool.c b/drivers/net/atl1e/atl1e_ethtool.c
index 619c6583e1aa..4003955d7a96 100644
--- a/drivers/net/atl1e/atl1e_ethtool.c
+++ b/drivers/net/atl1e/atl1e_ethtool.c
@@ -365,6 +365,8 @@ static int atl1e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
365 if (wol->wolopts & WAKE_PHY) 365 if (wol->wolopts & WAKE_PHY)
366 adapter->wol |= AT_WUFC_LNKC; 366 adapter->wol |= AT_WUFC_LNKC;
367 367
368 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
369
368 return 0; 370 return 0;
369} 371}
370 372
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index f703758f0a6e..5b4bf3d2cdc2 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -73,7 +73,7 @@ static inline char *nic_name(struct pci_dev *pdev)
73#define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST) 73#define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST)
74 74
75#define BE_MAX_LRO_DESCRIPTORS 16 75#define BE_MAX_LRO_DESCRIPTORS 16
76#define BE_MAX_FRAGS_PER_FRAME 16 76#define BE_MAX_FRAGS_PER_FRAME (min((u32) 16, (u32) MAX_SKB_FRAGS))
77 77
78struct be_dma_mem { 78struct be_dma_mem {
79 void *va; 79 void *va;
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index 9592f22e4c8c..cccc5419ad72 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -162,8 +162,8 @@ be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
162 return -EINVAL; 162 return -EINVAL;
163 163
164 adapter->max_rx_coal = coalesce->rx_max_coalesced_frames; 164 adapter->max_rx_coal = coalesce->rx_max_coalesced_frames;
165 if (adapter->max_rx_coal > MAX_SKB_FRAGS) 165 if (adapter->max_rx_coal > BE_MAX_FRAGS_PER_FRAME)
166 adapter->max_rx_coal = MAX_SKB_FRAGS - 1; 166 adapter->max_rx_coal = BE_MAX_FRAGS_PER_FRAME;
167 167
168 /* if AIC is being turned on now, start with an EQD of 0 */ 168 /* if AIC is being turned on now, start with an EQD of 0 */
169 if (rx_eq->enable_aic == 0 && 169 if (rx_eq->enable_aic == 0 &&
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 66c10c87f517..308eb09ca56b 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -666,7 +666,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
666{ 666{
667 struct be_queue_info *rxq = &adapter->rx_obj.q; 667 struct be_queue_info *rxq = &adapter->rx_obj.q;
668 struct be_rx_page_info *page_info; 668 struct be_rx_page_info *page_info;
669 u16 rxq_idx, i, num_rcvd; 669 u16 rxq_idx, i, num_rcvd, j;
670 u32 pktsize, hdr_len, curr_frag_len; 670 u32 pktsize, hdr_len, curr_frag_len;
671 u8 *start; 671 u8 *start;
672 672
@@ -709,22 +709,33 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
709 709
710 /* More frags present for this completion */ 710 /* More frags present for this completion */
711 pktsize -= curr_frag_len; /* account for above copied frag */ 711 pktsize -= curr_frag_len; /* account for above copied frag */
712 for (i = 1; i < num_rcvd; i++) { 712 for (i = 1, j = 0; i < num_rcvd; i++) {
713 index_inc(&rxq_idx, rxq->len); 713 index_inc(&rxq_idx, rxq->len);
714 page_info = get_rx_page_info(adapter, rxq_idx); 714 page_info = get_rx_page_info(adapter, rxq_idx);
715 715
716 curr_frag_len = min(pktsize, rx_frag_size); 716 curr_frag_len = min(pktsize, rx_frag_size);
717 717
718 skb_shinfo(skb)->frags[i].page = page_info->page; 718 /* Coalesce all frags from the same physical page in one slot */
719 skb_shinfo(skb)->frags[i].page_offset = page_info->page_offset; 719 if (page_info->page_offset == 0) {
720 skb_shinfo(skb)->frags[i].size = curr_frag_len; 720 /* Fresh page */
721 j++;
722 skb_shinfo(skb)->frags[j].page = page_info->page;
723 skb_shinfo(skb)->frags[j].page_offset =
724 page_info->page_offset;
725 skb_shinfo(skb)->frags[j].size = 0;
726 skb_shinfo(skb)->nr_frags++;
727 } else {
728 put_page(page_info->page);
729 }
730
731 skb_shinfo(skb)->frags[j].size += curr_frag_len;
721 skb->len += curr_frag_len; 732 skb->len += curr_frag_len;
722 skb->data_len += curr_frag_len; 733 skb->data_len += curr_frag_len;
723 skb_shinfo(skb)->nr_frags++;
724 pktsize -= curr_frag_len; 734 pktsize -= curr_frag_len;
725 735
726 memset(page_info, 0, sizeof(*page_info)); 736 memset(page_info, 0, sizeof(*page_info));
727 } 737 }
738 BUG_ON(j > MAX_SKB_FRAGS);
728 739
729done: 740done:
730 be_rx_stats_update(adapter, pktsize, num_rcvd); 741 be_rx_stats_update(adapter, pktsize, num_rcvd);
@@ -786,7 +797,7 @@ static void be_rx_compl_process_lro(struct be_adapter *adapter,
786 struct skb_frag_struct rx_frags[BE_MAX_FRAGS_PER_FRAME]; 797 struct skb_frag_struct rx_frags[BE_MAX_FRAGS_PER_FRAME];
787 struct be_queue_info *rxq = &adapter->rx_obj.q; 798 struct be_queue_info *rxq = &adapter->rx_obj.q;
788 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len; 799 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
789 u16 i, rxq_idx = 0, vid; 800 u16 i, rxq_idx = 0, vid, j;
790 801
791 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); 802 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
792 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp); 803 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
@@ -794,20 +805,28 @@ static void be_rx_compl_process_lro(struct be_adapter *adapter,
794 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); 805 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
795 806
796 remaining = pkt_size; 807 remaining = pkt_size;
797 for (i = 0; i < num_rcvd; i++) { 808 for (i = 0, j = -1; i < num_rcvd; i++) {
798 page_info = get_rx_page_info(adapter, rxq_idx); 809 page_info = get_rx_page_info(adapter, rxq_idx);
799 810
800 curr_frag_len = min(remaining, rx_frag_size); 811 curr_frag_len = min(remaining, rx_frag_size);
801 812
802 rx_frags[i].page = page_info->page; 813 /* Coalesce all frags from the same physical page in one slot */
803 rx_frags[i].page_offset = page_info->page_offset; 814 if (i == 0 || page_info->page_offset == 0) {
804 rx_frags[i].size = curr_frag_len; 815 /* First frag or Fresh page */
805 remaining -= curr_frag_len; 816 j++;
817 rx_frags[j].page = page_info->page;
818 rx_frags[j].page_offset = page_info->page_offset;
819 rx_frags[j].size = 0;
820 } else {
821 put_page(page_info->page);
822 }
823 rx_frags[j].size += curr_frag_len;
806 824
825 remaining -= curr_frag_len;
807 index_inc(&rxq_idx, rxq->len); 826 index_inc(&rxq_idx, rxq->len);
808
809 memset(page_info, 0, sizeof(*page_info)); 827 memset(page_info, 0, sizeof(*page_info));
810 } 828 }
829 BUG_ON(j > MAX_SKB_FRAGS);
811 830
812 if (likely(!vlanf)) { 831 if (likely(!vlanf)) {
813 lro_receive_frags(&adapter->rx_obj.lro_mgr, rx_frags, pkt_size, 832 lro_receive_frags(&adapter->rx_obj.lro_mgr, rx_frags, pkt_size,
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index fbf1352e9c1c..951714a7f90a 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -8637,6 +8637,14 @@ static int bnx2x_nway_reset(struct net_device *dev)
8637 return 0; 8637 return 0;
8638} 8638}
8639 8639
8640static u32
8641bnx2x_get_link(struct net_device *dev)
8642{
8643 struct bnx2x *bp = netdev_priv(dev);
8644
8645 return bp->link_vars.link_up;
8646}
8647
8640static int bnx2x_get_eeprom_len(struct net_device *dev) 8648static int bnx2x_get_eeprom_len(struct net_device *dev)
8641{ 8649{
8642 struct bnx2x *bp = netdev_priv(dev); 8650 struct bnx2x *bp = netdev_priv(dev);
@@ -10034,7 +10042,7 @@ static struct ethtool_ops bnx2x_ethtool_ops = {
10034 .get_msglevel = bnx2x_get_msglevel, 10042 .get_msglevel = bnx2x_get_msglevel,
10035 .set_msglevel = bnx2x_set_msglevel, 10043 .set_msglevel = bnx2x_set_msglevel,
10036 .nway_reset = bnx2x_nway_reset, 10044 .nway_reset = bnx2x_nway_reset,
10037 .get_link = ethtool_op_get_link, 10045 .get_link = bnx2x_get_link,
10038 .get_eeprom_len = bnx2x_get_eeprom_len, 10046 .get_eeprom_len = bnx2x_get_eeprom_len,
10039 .get_eeprom = bnx2x_get_eeprom, 10047 .get_eeprom = bnx2x_get_eeprom,
10040 .set_eeprom = bnx2x_set_eeprom, 10048 .set_eeprom = bnx2x_set_eeprom,
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index 58afafbd3b9c..fd5e32cbcb87 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -1097,7 +1097,7 @@ static const struct net_device_ops cpmac_netdev_ops = {
1097 .ndo_start_xmit = cpmac_start_xmit, 1097 .ndo_start_xmit = cpmac_start_xmit,
1098 .ndo_tx_timeout = cpmac_tx_timeout, 1098 .ndo_tx_timeout = cpmac_tx_timeout,
1099 .ndo_set_multicast_list = cpmac_set_multicast_list, 1099 .ndo_set_multicast_list = cpmac_set_multicast_list,
1100 .ndo_so_ioctl = cpmac_ioctl, 1100 .ndo_do_ioctl = cpmac_ioctl,
1101 .ndo_set_config = cpmac_config, 1101 .ndo_set_config = cpmac_config,
1102 .ndo_change_mtu = eth_change_mtu, 1102 .ndo_change_mtu = eth_change_mtu,
1103 .ndo_validate_addr = eth_validate_addr, 1103 .ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 5e3356f8eb5a..5b8cbdb4b520 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -2185,12 +2185,16 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2185 /* Free all the Rx ring sk_buffs */ 2185 /* Free all the Rx ring sk_buffs */
2186 for (i = 0; i < rx_ring->count; i++) { 2186 for (i = 0; i < rx_ring->count; i++) {
2187 buffer_info = &rx_ring->buffer_info[i]; 2187 buffer_info = &rx_ring->buffer_info[i];
2188 if (buffer_info->skb) { 2188 if (buffer_info->dma) {
2189 pci_unmap_single(pdev, 2189 pci_unmap_single(pdev,
2190 buffer_info->dma, 2190 buffer_info->dma,
2191 buffer_info->length, 2191 buffer_info->length,
2192 PCI_DMA_FROMDEVICE); 2192 PCI_DMA_FROMDEVICE);
2193 }
2194
2195 buffer_info->dma = 0;
2193 2196
2197 if (buffer_info->skb) {
2194 dev_kfree_skb(buffer_info->skb); 2198 dev_kfree_skb(buffer_info->skb);
2195 buffer_info->skb = NULL; 2199 buffer_info->skb = NULL;
2196 } 2200 }
@@ -4033,6 +4037,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4033 buffer_info->dma, 4037 buffer_info->dma,
4034 buffer_info->length, 4038 buffer_info->length,
4035 PCI_DMA_FROMDEVICE); 4039 PCI_DMA_FROMDEVICE);
4040 buffer_info->dma = 0;
4036 4041
4037 length = le16_to_cpu(rx_desc->length); 4042 length = le16_to_cpu(rx_desc->length);
4038 /* !EOP means multiple descriptors were used to store a single 4043 /* !EOP means multiple descriptors were used to store a single
@@ -4222,6 +4227,7 @@ map_skb:
4222 pci_unmap_single(pdev, buffer_info->dma, 4227 pci_unmap_single(pdev, buffer_info->dma,
4223 adapter->rx_buffer_len, 4228 adapter->rx_buffer_len,
4224 PCI_DMA_FROMDEVICE); 4229 PCI_DMA_FROMDEVICE);
4230 buffer_info->dma = 0;
4225 4231
4226 break; /* while !buffer_info->skb */ 4232 break; /* while !buffer_info->skb */
4227 } 4233 }
@@ -4817,6 +4823,9 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
4817 4823
4818 netif_device_detach(netdev); 4824 netif_device_detach(netdev);
4819 4825
4826 if (state == pci_channel_io_perm_failure)
4827 return PCI_ERS_RESULT_DISCONNECT;
4828
4820 if (netif_running(netdev)) 4829 if (netif_running(netdev))
4821 e1000_down(adapter); 4830 e1000_down(adapter);
4822 pci_disable_device(pdev); 4831 pci_disable_device(pdev);
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 679885a122b4..63415bb6f48f 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -4785,6 +4785,9 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
4785 4785
4786 netif_device_detach(netdev); 4786 netif_device_detach(netdev);
4787 4787
4788 if (state == pci_channel_io_perm_failure)
4789 return PCI_ERS_RESULT_DISCONNECT;
4790
4788 if (netif_running(netdev)) 4791 if (netif_running(netdev))
4789 e1000e_down(adapter); 4792 e1000e_down(adapter);
4790 pci_disable_device(pdev); 4793 pci_disable_device(pdev);
diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c
index 3af581303ca2..d167090248e2 100644
--- a/drivers/net/fsl_pq_mdio.c
+++ b/drivers/net/fsl_pq_mdio.c
@@ -188,7 +188,7 @@ static int fsl_pq_mdio_find_free(struct mii_bus *new_bus)
188} 188}
189 189
190 190
191#ifdef CONFIG_GIANFAR 191#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
192static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs) 192static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs)
193{ 193{
194 struct gfar __iomem *enet_regs; 194 struct gfar __iomem *enet_regs;
@@ -206,7 +206,7 @@ static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs)
206#endif 206#endif
207 207
208 208
209#ifdef CONFIG_UCC_GETH 209#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
210static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id) 210static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id)
211{ 211{
212 struct device_node *np = NULL; 212 struct device_node *np = NULL;
@@ -291,7 +291,7 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
291 if (of_device_is_compatible(np, "fsl,gianfar-mdio") || 291 if (of_device_is_compatible(np, "fsl,gianfar-mdio") ||
292 of_device_is_compatible(np, "fsl,gianfar-tbi") || 292 of_device_is_compatible(np, "fsl,gianfar-tbi") ||
293 of_device_is_compatible(np, "gianfar")) { 293 of_device_is_compatible(np, "gianfar")) {
294#ifdef CONFIG_GIANFAR 294#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
295 tbipa = get_gfar_tbipa(regs); 295 tbipa = get_gfar_tbipa(regs);
296#else 296#else
297 err = -ENODEV; 297 err = -ENODEV;
@@ -299,7 +299,7 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
299#endif 299#endif
300 } else if (of_device_is_compatible(np, "fsl,ucc-mdio") || 300 } else if (of_device_is_compatible(np, "fsl,ucc-mdio") ||
301 of_device_is_compatible(np, "ucc_geth_phy")) { 301 of_device_is_compatible(np, "ucc_geth_phy")) {
302#ifdef CONFIG_UCC_GETH 302#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
303 u32 id; 303 u32 id;
304 static u32 mii_mng_master; 304 static u32 mii_mng_master;
305 305
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index ea17319624aa..be480292aba1 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -4549,11 +4549,12 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
4549 cleaned = true; 4549 cleaned = true;
4550 cleaned_count++; 4550 cleaned_count++;
4551 4551
4552 /* this is the fast path for the non-packet split case */
4552 if (!adapter->rx_ps_hdr_size) { 4553 if (!adapter->rx_ps_hdr_size) {
4553 pci_unmap_single(pdev, buffer_info->dma, 4554 pci_unmap_single(pdev, buffer_info->dma,
4554 adapter->rx_buffer_len + 4555 adapter->rx_buffer_len,
4555 NET_IP_ALIGN,
4556 PCI_DMA_FROMDEVICE); 4556 PCI_DMA_FROMDEVICE);
4557 buffer_info->dma = 0;
4557 skb_put(skb, length); 4558 skb_put(skb, length);
4558 goto send_up; 4559 goto send_up;
4559 } 4560 }
@@ -4570,8 +4571,9 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
4570 4571
4571 if (!skb_shinfo(skb)->nr_frags) { 4572 if (!skb_shinfo(skb)->nr_frags) {
4572 pci_unmap_single(pdev, buffer_info->dma, 4573 pci_unmap_single(pdev, buffer_info->dma,
4573 adapter->rx_ps_hdr_size + NET_IP_ALIGN, 4574 adapter->rx_ps_hdr_size,
4574 PCI_DMA_FROMDEVICE); 4575 PCI_DMA_FROMDEVICE);
4576 buffer_info->dma = 0;
4575 skb_put(skb, hlen); 4577 skb_put(skb, hlen);
4576 } 4578 }
4577 4579
@@ -4713,7 +4715,6 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
4713 bufsz = adapter->rx_ps_hdr_size; 4715 bufsz = adapter->rx_ps_hdr_size;
4714 else 4716 else
4715 bufsz = adapter->rx_buffer_len; 4717 bufsz = adapter->rx_buffer_len;
4716 bufsz += NET_IP_ALIGN;
4717 4718
4718 while (cleaned_count--) { 4719 while (cleaned_count--) {
4719 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); 4720 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
@@ -4737,7 +4738,7 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
4737 } 4738 }
4738 4739
4739 if (!buffer_info->skb) { 4740 if (!buffer_info->skb) {
4740 skb = netdev_alloc_skb(netdev, bufsz); 4741 skb = netdev_alloc_skb(netdev, bufsz + NET_IP_ALIGN);
4741 if (!skb) { 4742 if (!skb) {
4742 adapter->alloc_rx_buff_failed++; 4743 adapter->alloc_rx_buff_failed++;
4743 goto no_buffers; 4744 goto no_buffers;
@@ -5338,6 +5339,9 @@ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
5338 5339
5339 netif_device_detach(netdev); 5340 netif_device_detach(netdev);
5340 5341
5342 if (state == pci_channel_io_perm_failure)
5343 return PCI_ERS_RESULT_DISCONNECT;
5344
5341 if (netif_running(netdev)) 5345 if (netif_running(netdev))
5342 igb_down(adapter); 5346 igb_down(adapter);
5343 pci_disable_device(pdev); 5347 pci_disable_device(pdev);
diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c
index f3eed6a8fba5..911c082cee5a 100644
--- a/drivers/net/irda/bfin_sir.c
+++ b/drivers/net/irda/bfin_sir.c
@@ -677,6 +677,14 @@ static int bfin_sir_init_iobuf(iobuff_t *io, int size)
677 return 0; 677 return 0;
678} 678}
679 679
680static const struct net_device_ops bfin_sir_ndo = {
681 .ndo_open = bfin_sir_open,
682 .ndo_stop = bfin_sir_stop,
683 .ndo_start_xmit = bfin_sir_hard_xmit,
684 .ndo_do_ioctl = bfin_sir_ioctl,
685 .ndo_get_stats = bfin_sir_stats,
686};
687
680static int __devinit bfin_sir_probe(struct platform_device *pdev) 688static int __devinit bfin_sir_probe(struct platform_device *pdev)
681{ 689{
682 struct net_device *dev; 690 struct net_device *dev;
@@ -718,12 +726,8 @@ static int __devinit bfin_sir_probe(struct platform_device *pdev)
718 if (err) 726 if (err)
719 goto err_mem_3; 727 goto err_mem_3;
720 728
721 dev->hard_start_xmit = bfin_sir_hard_xmit; 729 dev->netdev_ops = &bfin_sir_ndo;
722 dev->open = bfin_sir_open; 730 dev->irq = sir_port->irq;
723 dev->stop = bfin_sir_stop;
724 dev->do_ioctl = bfin_sir_ioctl;
725 dev->get_stats = bfin_sir_stats;
726 dev->irq = sir_port->irq;
727 731
728 irda_init_max_qos_capabilies(&self->qos); 732 irda_init_max_qos_capabilies(&self->qos);
729 733
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 86f4f3e36f27..0f7b6a3a2e68 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -139,7 +139,7 @@ static int ixgbe_get_settings(struct net_device *netdev,
139 ecmd->autoneg = AUTONEG_ENABLE; 139 ecmd->autoneg = AUTONEG_ENABLE;
140 ecmd->transceiver = XCVR_EXTERNAL; 140 ecmd->transceiver = XCVR_EXTERNAL;
141 if ((hw->phy.media_type == ixgbe_media_type_copper) || 141 if ((hw->phy.media_type == ixgbe_media_type_copper) ||
142 (hw->mac.type == ixgbe_mac_82599EB)) { 142 (hw->phy.multispeed_fiber)) {
143 ecmd->supported |= (SUPPORTED_1000baseT_Full | 143 ecmd->supported |= (SUPPORTED_1000baseT_Full |
144 SUPPORTED_Autoneg); 144 SUPPORTED_Autoneg);
145 145
@@ -217,7 +217,7 @@ static int ixgbe_set_settings(struct net_device *netdev,
217 s32 err = 0; 217 s32 err = 0;
218 218
219 if ((hw->phy.media_type == ixgbe_media_type_copper) || 219 if ((hw->phy.media_type == ixgbe_media_type_copper) ||
220 (hw->mac.type == ixgbe_mac_82599EB)) { 220 (hw->phy.multispeed_fiber)) {
221 /* 10000/copper and 1000/copper must autoneg 221 /* 10000/copper and 1000/copper must autoneg
222 * this function does not support any duplex forcing, but can 222 * this function does not support any duplex forcing, but can
223 * limit the advertising of the adapter to only 10000 or 1000 */ 223 * limit the advertising of the adapter to only 10000 or 1000 */
@@ -245,6 +245,7 @@ static int ixgbe_set_settings(struct net_device *netdev,
245 } else { 245 } else {
246 /* in this case we currently only support 10Gb/FULL */ 246 /* in this case we currently only support 10Gb/FULL */
247 if ((ecmd->autoneg == AUTONEG_ENABLE) || 247 if ((ecmd->autoneg == AUTONEG_ENABLE) ||
248 (ecmd->advertising != ADVERTISED_10000baseT_Full) ||
248 (ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)) 249 (ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
249 return -EINVAL; 250 return -EINVAL;
250 } 251 }
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index e756e220db32..5588ef493a3d 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -563,7 +563,6 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
563 union ixgbe_adv_rx_desc *rx_desc; 563 union ixgbe_adv_rx_desc *rx_desc;
564 struct ixgbe_rx_buffer *bi; 564 struct ixgbe_rx_buffer *bi;
565 unsigned int i; 565 unsigned int i;
566 unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN;
567 566
568 i = rx_ring->next_to_use; 567 i = rx_ring->next_to_use;
569 bi = &rx_ring->rx_buffer_info[i]; 568 bi = &rx_ring->rx_buffer_info[i];
@@ -593,7 +592,9 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
593 592
594 if (!bi->skb) { 593 if (!bi->skb) {
595 struct sk_buff *skb; 594 struct sk_buff *skb;
596 skb = netdev_alloc_skb(adapter->netdev, bufsz); 595 skb = netdev_alloc_skb(adapter->netdev,
596 (rx_ring->rx_buf_len +
597 NET_IP_ALIGN));
597 598
598 if (!skb) { 599 if (!skb) {
599 adapter->alloc_rx_buff_failed++; 600 adapter->alloc_rx_buff_failed++;
@@ -608,7 +609,8 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
608 skb_reserve(skb, NET_IP_ALIGN); 609 skb_reserve(skb, NET_IP_ALIGN);
609 610
610 bi->skb = skb; 611 bi->skb = skb;
611 bi->dma = pci_map_single(pdev, skb->data, bufsz, 612 bi->dma = pci_map_single(pdev, skb->data,
613 rx_ring->rx_buf_len,
612 PCI_DMA_FROMDEVICE); 614 PCI_DMA_FROMDEVICE);
613 } 615 }
614 /* Refresh the desc even if buffer_addrs didn't change because 616 /* Refresh the desc even if buffer_addrs didn't change because
@@ -732,6 +734,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
732 pci_unmap_single(pdev, rx_buffer_info->dma, 734 pci_unmap_single(pdev, rx_buffer_info->dma,
733 rx_ring->rx_buf_len, 735 rx_ring->rx_buf_len,
734 PCI_DMA_FROMDEVICE); 736 PCI_DMA_FROMDEVICE);
737 rx_buffer_info->dma = 0;
735 skb_put(skb, len); 738 skb_put(skb, len);
736 } 739 }
737 740
@@ -2701,7 +2704,10 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
2701 */ 2704 */
2702 err = hw->phy.ops.identify(hw); 2705 err = hw->phy.ops.identify(hw);
2703 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 2706 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
2704 DPRINTK(PROBE, ERR, "PHY not supported on this NIC %d\n", err); 2707 dev_err(&adapter->pdev->dev, "failed to initialize because "
2708 "an unsupported SFP+ module type was detected.\n"
2709 "Reload the driver after installing a supported "
2710 "module.\n");
2705 ixgbe_down(adapter); 2711 ixgbe_down(adapter);
2706 return err; 2712 return err;
2707 } 2713 }
@@ -2812,9 +2818,11 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
2812 } 2818 }
2813 if (!rx_buffer_info->page) 2819 if (!rx_buffer_info->page)
2814 continue; 2820 continue;
2815 pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2, 2821 if (rx_buffer_info->page_dma) {
2816 PCI_DMA_FROMDEVICE); 2822 pci_unmap_page(pdev, rx_buffer_info->page_dma,
2817 rx_buffer_info->page_dma = 0; 2823 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
2824 rx_buffer_info->page_dma = 0;
2825 }
2818 put_page(rx_buffer_info->page); 2826 put_page(rx_buffer_info->page);
2819 rx_buffer_info->page = NULL; 2827 rx_buffer_info->page = NULL;
2820 rx_buffer_info->page_offset = 0; 2828 rx_buffer_info->page_offset = 0;
@@ -3720,10 +3728,11 @@ static void ixgbe_sfp_task(struct work_struct *work)
3720 goto reschedule; 3728 goto reschedule;
3721 ret = hw->phy.ops.reset(hw); 3729 ret = hw->phy.ops.reset(hw);
3722 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3730 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3723 DPRINTK(PROBE, ERR, "failed to initialize because an " 3731 dev_err(&adapter->pdev->dev, "failed to initialize "
3724 "unsupported SFP+ module type was detected.\n" 3732 "because an unsupported SFP+ module type "
3725 "Reload the driver after installing a " 3733 "was detected.\n"
3726 "supported module.\n"); 3734 "Reload the driver after installing a "
3735 "supported module.\n");
3727 unregister_netdev(adapter->netdev); 3736 unregister_netdev(adapter->netdev);
3728 } else { 3737 } else {
3729 DPRINTK(PROBE, INFO, "detected SFP+: %d\n", 3738 DPRINTK(PROBE, INFO, "detected SFP+: %d\n",
@@ -4502,7 +4511,8 @@ static void ixgbe_multispeed_fiber_task(struct work_struct *work)
4502 u32 autoneg; 4511 u32 autoneg;
4503 4512
4504 adapter->flags |= IXGBE_FLAG_IN_SFP_LINK_TASK; 4513 adapter->flags |= IXGBE_FLAG_IN_SFP_LINK_TASK;
4505 if (hw->mac.ops.get_link_capabilities) 4514 autoneg = hw->phy.autoneg_advertised;
4515 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
4506 hw->mac.ops.get_link_capabilities(hw, &autoneg, 4516 hw->mac.ops.get_link_capabilities(hw, &autoneg,
4507 &hw->mac.autoneg); 4517 &hw->mac.autoneg);
4508 if (hw->mac.ops.setup_link_speed) 4518 if (hw->mac.ops.setup_link_speed)
@@ -4526,7 +4536,10 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work)
4526 adapter->flags |= IXGBE_FLAG_IN_SFP_MOD_TASK; 4536 adapter->flags |= IXGBE_FLAG_IN_SFP_MOD_TASK;
4527 err = hw->phy.ops.identify_sfp(hw); 4537 err = hw->phy.ops.identify_sfp(hw);
4528 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 4538 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4529 DPRINTK(PROBE, ERR, "PHY not supported on this NIC %d\n", err); 4539 dev_err(&adapter->pdev->dev, "failed to initialize because "
4540 "an unsupported SFP+ module type was detected.\n"
4541 "Reload the driver after installing a supported "
4542 "module.\n");
4530 ixgbe_down(adapter); 4543 ixgbe_down(adapter);
4531 return; 4544 return;
4532 } 4545 }
@@ -5513,8 +5526,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
5513 round_jiffies(jiffies + (2 * HZ))); 5526 round_jiffies(jiffies + (2 * HZ)));
5514 err = 0; 5527 err = 0;
5515 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 5528 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
5516 dev_err(&adapter->pdev->dev, "failed to load because an " 5529 dev_err(&adapter->pdev->dev, "failed to initialize because "
5517 "unsupported SFP+ module type was detected.\n"); 5530 "an unsupported SFP+ module type was detected.\n"
5531 "Reload the driver after installing a supported "
5532 "module.\n");
5518 goto err_sw_init; 5533 goto err_sw_init;
5519 } else if (err) { 5534 } else if (err) {
5520 dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err); 5535 dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err);
diff --git a/drivers/net/mdio.c b/drivers/net/mdio.c
index dc45e9856c35..6851bdb2ce29 100644
--- a/drivers/net/mdio.c
+++ b/drivers/net/mdio.c
@@ -14,6 +14,10 @@
14#include <linux/mdio.h> 14#include <linux/mdio.h>
15#include <linux/module.h> 15#include <linux/module.h>
16 16
17MODULE_DESCRIPTION("Generic support for MDIO-compatible transceivers");
18MODULE_AUTHOR("Copyright 2006-2009 Solarflare Communications Inc.");
19MODULE_LICENSE("GPL");
20
17/** 21/**
18 * mdio45_probe - probe for an MDIO (clause 45) device 22 * mdio45_probe - probe for an MDIO (clause 45) device
19 * @mdio: MDIO interface 23 * @mdio: MDIO interface
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 341882f959f3..a2d82ddb3b4d 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -865,8 +865,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
865 struct sh_eth_private *mdp = netdev_priv(ndev); 865 struct sh_eth_private *mdp = netdev_priv(ndev);
866 struct sh_eth_cpu_data *cd = mdp->cd; 866 struct sh_eth_cpu_data *cd = mdp->cd;
867 irqreturn_t ret = IRQ_NONE; 867 irqreturn_t ret = IRQ_NONE;
868 u32 ioaddr, boguscnt = RX_RING_SIZE; 868 u32 ioaddr, intr_status = 0;
869 u32 intr_status = 0;
870 869
871 ioaddr = ndev->base_addr; 870 ioaddr = ndev->base_addr;
872 spin_lock(&mdp->lock); 871 spin_lock(&mdp->lock);
@@ -901,12 +900,6 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
901 if (intr_status & cd->eesr_err_check) 900 if (intr_status & cd->eesr_err_check)
902 sh_eth_error(ndev, intr_status); 901 sh_eth_error(ndev, intr_status);
903 902
904 if (--boguscnt < 0) {
905 printk(KERN_WARNING
906 "%s: Too much work at interrupt, status=0x%4.4x.\n",
907 ndev->name, intr_status);
908 }
909
910other_irq: 903other_irq:
911 spin_unlock(&mdp->lock); 904 spin_unlock(&mdp->lock);
912 905
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 7681d28c53d7..daf961ab68bc 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -2495,7 +2495,7 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
2495 if (likely(status >> 16 == (status & 0xffff))) { 2495 if (likely(status >> 16 == (status & 0xffff))) {
2496 skb = sky2->rx_ring[sky2->rx_next].skb; 2496 skb = sky2->rx_ring[sky2->rx_next].skb;
2497 skb->ip_summed = CHECKSUM_COMPLETE; 2497 skb->ip_summed = CHECKSUM_COMPLETE;
2498 skb->csum = status & 0xffff; 2498 skb->csum = le16_to_cpu(status);
2499 } else { 2499 } else {
2500 printk(KERN_NOTICE PFX "%s: hardware receive " 2500 printk(KERN_NOTICE PFX "%s: hardware receive "
2501 "checksum problem (status = %#x)\n", 2501 "checksum problem (status = %#x)\n",
diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c
index 80e01778dd3b..cd35d50e46d4 100644
--- a/drivers/net/usb/cdc_eem.c
+++ b/drivers/net/usb/cdc_eem.c
@@ -319,7 +319,7 @@ static int eem_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
319 return crc == crc2; 319 return crc == crc2;
320 320
321 if (unlikely(crc != crc2)) { 321 if (unlikely(crc != crc2)) {
322 dev->stats.rx_errors++; 322 dev->net->stats.rx_errors++;
323 dev_kfree_skb_any(skb2); 323 dev_kfree_skb_any(skb2);
324 } else 324 } else
325 usbnet_skb_return(dev, skb2); 325 usbnet_skb_return(dev, skb2);
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 7ae82446b93a..1d3730d6690f 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -513,11 +513,11 @@ static int dm9601_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
513 len = (skb->data[1] | (skb->data[2] << 8)) - 4; 513 len = (skb->data[1] | (skb->data[2] << 8)) - 4;
514 514
515 if (unlikely(status & 0xbf)) { 515 if (unlikely(status & 0xbf)) {
516 if (status & 0x01) dev->stats.rx_fifo_errors++; 516 if (status & 0x01) dev->net->stats.rx_fifo_errors++;
517 if (status & 0x02) dev->stats.rx_crc_errors++; 517 if (status & 0x02) dev->net->stats.rx_crc_errors++;
518 if (status & 0x04) dev->stats.rx_frame_errors++; 518 if (status & 0x04) dev->net->stats.rx_frame_errors++;
519 if (status & 0x20) dev->stats.rx_missed_errors++; 519 if (status & 0x20) dev->net->stats.rx_missed_errors++;
520 if (status & 0x90) dev->stats.rx_length_errors++; 520 if (status & 0x90) dev->net->stats.rx_length_errors++;
521 return 0; 521 return 0;
522 } 522 }
523 523
diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c
index 034e8a73ca6b..aeb1ab03a9ee 100644
--- a/drivers/net/usb/net1080.c
+++ b/drivers/net/usb/net1080.c
@@ -433,7 +433,7 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
433 dbg("rx framesize %d range %d..%d mtu %d", skb->len, 433 dbg("rx framesize %d range %d..%d mtu %d", skb->len,
434 net->hard_header_len, dev->hard_mtu, net->mtu); 434 net->hard_header_len, dev->hard_mtu, net->mtu);
435#endif 435#endif
436 dev->stats.rx_frame_errors++; 436 dev->net->stats.rx_frame_errors++;
437 nc_ensure_sync(dev); 437 nc_ensure_sync(dev);
438 return 0; 438 return 0;
439 } 439 }
@@ -442,12 +442,12 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
442 hdr_len = le16_to_cpup(&header->hdr_len); 442 hdr_len = le16_to_cpup(&header->hdr_len);
443 packet_len = le16_to_cpup(&header->packet_len); 443 packet_len = le16_to_cpup(&header->packet_len);
444 if (FRAMED_SIZE(packet_len) > NC_MAX_PACKET) { 444 if (FRAMED_SIZE(packet_len) > NC_MAX_PACKET) {
445 dev->stats.rx_frame_errors++; 445 dev->net->stats.rx_frame_errors++;
446 dbg("packet too big, %d", packet_len); 446 dbg("packet too big, %d", packet_len);
447 nc_ensure_sync(dev); 447 nc_ensure_sync(dev);
448 return 0; 448 return 0;
449 } else if (hdr_len < MIN_HEADER) { 449 } else if (hdr_len < MIN_HEADER) {
450 dev->stats.rx_frame_errors++; 450 dev->net->stats.rx_frame_errors++;
451 dbg("header too short, %d", hdr_len); 451 dbg("header too short, %d", hdr_len);
452 nc_ensure_sync(dev); 452 nc_ensure_sync(dev);
453 return 0; 453 return 0;
@@ -465,21 +465,21 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
465 465
466 if ((packet_len & 0x01) == 0) { 466 if ((packet_len & 0x01) == 0) {
467 if (skb->data [packet_len] != PAD_BYTE) { 467 if (skb->data [packet_len] != PAD_BYTE) {
468 dev->stats.rx_frame_errors++; 468 dev->net->stats.rx_frame_errors++;
469 dbg("bad pad"); 469 dbg("bad pad");
470 return 0; 470 return 0;
471 } 471 }
472 skb_trim(skb, skb->len - 1); 472 skb_trim(skb, skb->len - 1);
473 } 473 }
474 if (skb->len != packet_len) { 474 if (skb->len != packet_len) {
475 dev->stats.rx_frame_errors++; 475 dev->net->stats.rx_frame_errors++;
476 dbg("bad packet len %d (expected %d)", 476 dbg("bad packet len %d (expected %d)",
477 skb->len, packet_len); 477 skb->len, packet_len);
478 nc_ensure_sync(dev); 478 nc_ensure_sync(dev);
479 return 0; 479 return 0;
480 } 480 }
481 if (header->packet_id != get_unaligned(&trailer->packet_id)) { 481 if (header->packet_id != get_unaligned(&trailer->packet_id)) {
482 dev->stats.rx_fifo_errors++; 482 dev->net->stats.rx_fifo_errors++;
483 dbg("(2+ dropped) rx packet_id mismatch 0x%x 0x%x", 483 dbg("(2+ dropped) rx packet_id mismatch 0x%x 0x%x",
484 le16_to_cpu(header->packet_id), 484 le16_to_cpu(header->packet_id),
485 le16_to_cpu(trailer->packet_id)); 485 le16_to_cpu(trailer->packet_id));
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index 1bf243ef950e..2232232b7989 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -487,7 +487,7 @@ int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
487 if (unlikely(hdr->msg_type != RNDIS_MSG_PACKET 487 if (unlikely(hdr->msg_type != RNDIS_MSG_PACKET
488 || skb->len < msg_len 488 || skb->len < msg_len
489 || (data_offset + data_len + 8) > msg_len)) { 489 || (data_offset + data_len + 8) > msg_len)) {
490 dev->stats.rx_frame_errors++; 490 dev->net->stats.rx_frame_errors++;
491 devdbg(dev, "bad rndis message %d/%d/%d/%d, len %d", 491 devdbg(dev, "bad rndis message %d/%d/%d/%d, len %d",
492 le32_to_cpu(hdr->msg_type), 492 le32_to_cpu(hdr->msg_type),
493 msg_len, data_offset, data_len, skb->len); 493 msg_len, data_offset, data_len, skb->len);
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 89a91f8c22de..fe045896406b 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1108,18 +1108,18 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
1108 if (unlikely(header & RX_STS_ES_)) { 1108 if (unlikely(header & RX_STS_ES_)) {
1109 if (netif_msg_rx_err(dev)) 1109 if (netif_msg_rx_err(dev))
1110 devdbg(dev, "Error header=0x%08x", header); 1110 devdbg(dev, "Error header=0x%08x", header);
1111 dev->stats.rx_errors++; 1111 dev->net->stats.rx_errors++;
1112 dev->stats.rx_dropped++; 1112 dev->net->stats.rx_dropped++;
1113 1113
1114 if (header & RX_STS_CRC_) { 1114 if (header & RX_STS_CRC_) {
1115 dev->stats.rx_crc_errors++; 1115 dev->net->stats.rx_crc_errors++;
1116 } else { 1116 } else {
1117 if (header & (RX_STS_TL_ | RX_STS_RF_)) 1117 if (header & (RX_STS_TL_ | RX_STS_RF_))
1118 dev->stats.rx_frame_errors++; 1118 dev->net->stats.rx_frame_errors++;
1119 1119
1120 if ((header & RX_STS_LE_) && 1120 if ((header & RX_STS_LE_) &&
1121 (!(header & RX_STS_FT_))) 1121 (!(header & RX_STS_FT_)))
1122 dev->stats.rx_length_errors++; 1122 dev->net->stats.rx_length_errors++;
1123 } 1123 }
1124 } else { 1124 } else {
1125 /* ETH_FRAME_LEN + 4(CRC) + 2(COE) + 4(Vlan) */ 1125 /* ETH_FRAME_LEN + 4(CRC) + 2(COE) + 4(Vlan) */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 22c0585a0319..edfd9e10ceba 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -234,8 +234,8 @@ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
234 int status; 234 int status;
235 235
236 skb->protocol = eth_type_trans (skb, dev->net); 236 skb->protocol = eth_type_trans (skb, dev->net);
237 dev->stats.rx_packets++; 237 dev->net->stats.rx_packets++;
238 dev->stats.rx_bytes += skb->len; 238 dev->net->stats.rx_bytes += skb->len;
239 239
240 if (netif_msg_rx_status (dev)) 240 if (netif_msg_rx_status (dev))
241 devdbg (dev, "< rx, len %zu, type 0x%x", 241 devdbg (dev, "< rx, len %zu, type 0x%x",
@@ -397,7 +397,7 @@ static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
397 if (netif_msg_rx_err (dev)) 397 if (netif_msg_rx_err (dev))
398 devdbg (dev, "drop"); 398 devdbg (dev, "drop");
399error: 399error:
400 dev->stats.rx_errors++; 400 dev->net->stats.rx_errors++;
401 skb_queue_tail (&dev->done, skb); 401 skb_queue_tail (&dev->done, skb);
402 } 402 }
403} 403}
@@ -420,8 +420,8 @@ static void rx_complete (struct urb *urb)
420 case 0: 420 case 0:
421 if (skb->len < dev->net->hard_header_len) { 421 if (skb->len < dev->net->hard_header_len) {
422 entry->state = rx_cleanup; 422 entry->state = rx_cleanup;
423 dev->stats.rx_errors++; 423 dev->net->stats.rx_errors++;
424 dev->stats.rx_length_errors++; 424 dev->net->stats.rx_length_errors++;
425 if (netif_msg_rx_err (dev)) 425 if (netif_msg_rx_err (dev))
426 devdbg (dev, "rx length %d", skb->len); 426 devdbg (dev, "rx length %d", skb->len);
427 } 427 }
@@ -433,7 +433,7 @@ static void rx_complete (struct urb *urb)
433 * storm, recovering as needed. 433 * storm, recovering as needed.
434 */ 434 */
435 case -EPIPE: 435 case -EPIPE:
436 dev->stats.rx_errors++; 436 dev->net->stats.rx_errors++;
437 usbnet_defer_kevent (dev, EVENT_RX_HALT); 437 usbnet_defer_kevent (dev, EVENT_RX_HALT);
438 // FALLTHROUGH 438 // FALLTHROUGH
439 439
@@ -451,7 +451,7 @@ static void rx_complete (struct urb *urb)
451 case -EPROTO: 451 case -EPROTO:
452 case -ETIME: 452 case -ETIME:
453 case -EILSEQ: 453 case -EILSEQ:
454 dev->stats.rx_errors++; 454 dev->net->stats.rx_errors++;
455 if (!timer_pending (&dev->delay)) { 455 if (!timer_pending (&dev->delay)) {
456 mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES); 456 mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES);
457 if (netif_msg_link (dev)) 457 if (netif_msg_link (dev))
@@ -465,12 +465,12 @@ block:
465 465
466 /* data overrun ... flush fifo? */ 466 /* data overrun ... flush fifo? */
467 case -EOVERFLOW: 467 case -EOVERFLOW:
468 dev->stats.rx_over_errors++; 468 dev->net->stats.rx_over_errors++;
469 // FALLTHROUGH 469 // FALLTHROUGH
470 470
471 default: 471 default:
472 entry->state = rx_cleanup; 472 entry->state = rx_cleanup;
473 dev->stats.rx_errors++; 473 dev->net->stats.rx_errors++;
474 if (netif_msg_rx_err (dev)) 474 if (netif_msg_rx_err (dev))
475 devdbg (dev, "rx status %d", urb_status); 475 devdbg (dev, "rx status %d", urb_status);
476 break; 476 break;
@@ -583,8 +583,8 @@ int usbnet_stop (struct net_device *net)
583 583
584 if (netif_msg_ifdown (dev)) 584 if (netif_msg_ifdown (dev))
585 devinfo (dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld", 585 devinfo (dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld",
586 dev->stats.rx_packets, dev->stats.tx_packets, 586 net->stats.rx_packets, net->stats.tx_packets,
587 dev->stats.rx_errors, dev->stats.tx_errors 587 net->stats.rx_errors, net->stats.tx_errors
588 ); 588 );
589 589
590 // ensure there are no more active urbs 590 // ensure there are no more active urbs
@@ -891,10 +891,10 @@ static void tx_complete (struct urb *urb)
891 struct usbnet *dev = entry->dev; 891 struct usbnet *dev = entry->dev;
892 892
893 if (urb->status == 0) { 893 if (urb->status == 0) {
894 dev->stats.tx_packets++; 894 dev->net->stats.tx_packets++;
895 dev->stats.tx_bytes += entry->length; 895 dev->net->stats.tx_bytes += entry->length;
896 } else { 896 } else {
897 dev->stats.tx_errors++; 897 dev->net->stats.tx_errors++;
898 898
899 switch (urb->status) { 899 switch (urb->status) {
900 case -EPIPE: 900 case -EPIPE:
@@ -1020,7 +1020,7 @@ int usbnet_start_xmit (struct sk_buff *skb, struct net_device *net)
1020 devdbg (dev, "drop, code %d", retval); 1020 devdbg (dev, "drop, code %d", retval);
1021drop: 1021drop:
1022 retval = NET_XMIT_SUCCESS; 1022 retval = NET_XMIT_SUCCESS;
1023 dev->stats.tx_dropped++; 1023 dev->net->stats.tx_dropped++;
1024 if (skb) 1024 if (skb)
1025 dev_kfree_skb_any (skb); 1025 dev_kfree_skb_any (skb);
1026 usb_free_urb (urb); 1026 usb_free_urb (urb);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 87197dd9c788..1097c72e44d5 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -208,11 +208,14 @@ rx_drop:
208 208
209static struct net_device_stats *veth_get_stats(struct net_device *dev) 209static struct net_device_stats *veth_get_stats(struct net_device *dev)
210{ 210{
211 struct veth_priv *priv = netdev_priv(dev); 211 struct veth_priv *priv;
212 struct net_device_stats *dev_stats = &dev->stats; 212 struct net_device_stats *dev_stats;
213 unsigned int cpu; 213 int cpu;
214 struct veth_net_stats *stats; 214 struct veth_net_stats *stats;
215 215
216 priv = netdev_priv(dev);
217 dev_stats = &dev->stats;
218
216 dev_stats->rx_packets = 0; 219 dev_stats->rx_packets = 0;
217 dev_stats->tx_packets = 0; 220 dev_stats->tx_packets = 0;
218 dev_stats->rx_bytes = 0; 221 dev_stats->rx_bytes = 0;
@@ -220,17 +223,16 @@ static struct net_device_stats *veth_get_stats(struct net_device *dev)
220 dev_stats->tx_dropped = 0; 223 dev_stats->tx_dropped = 0;
221 dev_stats->rx_dropped = 0; 224 dev_stats->rx_dropped = 0;
222 225
223 if (priv->stats) 226 for_each_online_cpu(cpu) {
224 for_each_online_cpu(cpu) { 227 stats = per_cpu_ptr(priv->stats, cpu);
225 stats = per_cpu_ptr(priv->stats, cpu);
226 228
227 dev_stats->rx_packets += stats->rx_packets; 229 dev_stats->rx_packets += stats->rx_packets;
228 dev_stats->tx_packets += stats->tx_packets; 230 dev_stats->tx_packets += stats->tx_packets;
229 dev_stats->rx_bytes += stats->rx_bytes; 231 dev_stats->rx_bytes += stats->rx_bytes;
230 dev_stats->tx_bytes += stats->tx_bytes; 232 dev_stats->tx_bytes += stats->tx_bytes;
231 dev_stats->tx_dropped += stats->tx_dropped; 233 dev_stats->tx_dropped += stats->tx_dropped;
232 dev_stats->rx_dropped += stats->rx_dropped; 234 dev_stats->rx_dropped += stats->rx_dropped;
233 } 235 }
234 236
235 return dev_stats; 237 return dev_stats;
236} 238}
@@ -257,8 +259,6 @@ static int veth_close(struct net_device *dev)
257 netif_carrier_off(dev); 259 netif_carrier_off(dev);
258 netif_carrier_off(priv->peer); 260 netif_carrier_off(priv->peer);
259 261
260 free_percpu(priv->stats);
261 priv->stats = NULL;
262 return 0; 262 return 0;
263} 263}
264 264
@@ -289,6 +289,15 @@ static int veth_dev_init(struct net_device *dev)
289 return 0; 289 return 0;
290} 290}
291 291
292static void veth_dev_free(struct net_device *dev)
293{
294 struct veth_priv *priv;
295
296 priv = netdev_priv(dev);
297 free_percpu(priv->stats);
298 free_netdev(dev);
299}
300
292static const struct net_device_ops veth_netdev_ops = { 301static const struct net_device_ops veth_netdev_ops = {
293 .ndo_init = veth_dev_init, 302 .ndo_init = veth_dev_init,
294 .ndo_open = veth_open, 303 .ndo_open = veth_open,
@@ -306,7 +315,7 @@ static void veth_setup(struct net_device *dev)
306 dev->netdev_ops = &veth_netdev_ops; 315 dev->netdev_ops = &veth_netdev_ops;
307 dev->ethtool_ops = &veth_ethtool_ops; 316 dev->ethtool_ops = &veth_ethtool_ops;
308 dev->features |= NETIF_F_LLTX; 317 dev->features |= NETIF_F_LLTX;
309 dev->destructor = free_netdev; 318 dev->destructor = veth_dev_free;
310} 319}
311 320
312/* 321/*
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 1032d5fdbd42..2597145a066e 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -2907,6 +2907,7 @@ enum parport_pc_pci_cards {
2907 netmos_9755, 2907 netmos_9755,
2908 netmos_9805, 2908 netmos_9805,
2909 netmos_9815, 2909 netmos_9815,
2910 netmos_9901,
2910 quatech_sppxp100, 2911 quatech_sppxp100,
2911}; 2912};
2912 2913
@@ -2987,7 +2988,7 @@ static struct parport_pc_pci {
2987 /* netmos_9755 */ { 2, { { 0, 1 }, { 2, 3 },} }, 2988 /* netmos_9755 */ { 2, { { 0, 1 }, { 2, 3 },} },
2988 /* netmos_9805 */ { 1, { { 0, -1 }, } }, 2989 /* netmos_9805 */ { 1, { { 0, -1 }, } },
2989 /* netmos_9815 */ { 2, { { 0, -1 }, { 2, -1 }, } }, 2990 /* netmos_9815 */ { 2, { { 0, -1 }, { 2, -1 }, } },
2990 2991 /* netmos_9901 */ { 1, { { 0, -1 }, } },
2991 /* quatech_sppxp100 */ { 1, { { 0, 1 }, } }, 2992 /* quatech_sppxp100 */ { 1, { { 0, 1 }, } },
2992}; 2993};
2993 2994
@@ -3089,6 +3090,8 @@ static const struct pci_device_id parport_pc_pci_tbl[] = {
3089 PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9805 }, 3090 PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9805 },
3090 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9815, 3091 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9815,
3091 PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9815 }, 3092 PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9815 },
3093 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9901,
3094 0xA000, 0x2000, 0, 0, netmos_9901 },
3092 /* Quatech SPPXP-100 Parallel port PCI ExpressCard */ 3095 /* Quatech SPPXP-100 Parallel port PCI ExpressCard */
3093 { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_SPPXP_100, 3096 { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_SPPXP_100,
3094 PCI_ANY_ID, PCI_ANY_ID, 0, 0, quatech_sppxp100 }, 3097 PCI_ANY_ID, PCI_ANY_ID, 0, 0, quatech_sppxp100 },
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index e53eacd75c8d..53075424a434 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -39,7 +39,6 @@
39#include <linux/sysdev.h> 39#include <linux/sysdev.h>
40#include <asm/cacheflush.h> 40#include <asm/cacheflush.h>
41#include <asm/iommu.h> 41#include <asm/iommu.h>
42#include <asm/e820.h>
43#include "pci.h" 42#include "pci.h"
44 43
45#define ROOT_SIZE VTD_PAGE_SIZE 44#define ROOT_SIZE VTD_PAGE_SIZE
@@ -57,14 +56,32 @@
57#define MAX_AGAW_WIDTH 64 56#define MAX_AGAW_WIDTH 64
58 57
59#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1) 58#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
59#define DOMAIN_MAX_PFN(gaw) ((((u64)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
60 60
61#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) 61#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
62#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32)) 62#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
63#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64)) 63#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
64 64
65#ifndef PHYSICAL_PAGE_MASK 65
66#define PHYSICAL_PAGE_MASK PAGE_MASK 66/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
67#endif 67 are never going to work. */
68static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
69{
70 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
71}
72
73static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
74{
75 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
76}
77static inline unsigned long page_to_dma_pfn(struct page *pg)
78{
79 return mm_to_dma_pfn(page_to_pfn(pg));
80}
81static inline unsigned long virt_to_dma_pfn(void *p)
82{
83 return page_to_dma_pfn(virt_to_page(p));
84}
68 85
69/* global iommu list, set NULL for ignored DMAR units */ 86/* global iommu list, set NULL for ignored DMAR units */
70static struct intel_iommu **g_iommus; 87static struct intel_iommu **g_iommus;
@@ -205,12 +222,17 @@ static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
205 222
206static inline u64 dma_pte_addr(struct dma_pte *pte) 223static inline u64 dma_pte_addr(struct dma_pte *pte)
207{ 224{
208 return (pte->val & VTD_PAGE_MASK); 225#ifdef CONFIG_64BIT
226 return pte->val & VTD_PAGE_MASK;
227#else
228 /* Must have a full atomic 64-bit read */
229 return __cmpxchg64(pte, 0ULL, 0ULL) & VTD_PAGE_MASK;
230#endif
209} 231}
210 232
211static inline void dma_set_pte_addr(struct dma_pte *pte, u64 addr) 233static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
212{ 234{
213 pte->val |= (addr & VTD_PAGE_MASK); 235 pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
214} 236}
215 237
216static inline bool dma_pte_present(struct dma_pte *pte) 238static inline bool dma_pte_present(struct dma_pte *pte)
@@ -218,6 +240,11 @@ static inline bool dma_pte_present(struct dma_pte *pte)
218 return (pte->val & 3) != 0; 240 return (pte->val & 3) != 0;
219} 241}
220 242
243static inline int first_pte_in_page(struct dma_pte *pte)
244{
245 return !((unsigned long)pte & ~VTD_PAGE_MASK);
246}
247
221/* 248/*
222 * This domain is a statically identity mapping domain. 249 * This domain is a statically identity mapping domain.
223 * 1. This domain creats a static 1:1 mapping to all usable memory. 250 * 1. This domain creats a static 1:1 mapping to all usable memory.
@@ -245,7 +272,6 @@ struct dmar_domain {
245 struct iova_domain iovad; /* iova's that belong to this domain */ 272 struct iova_domain iovad; /* iova's that belong to this domain */
246 273
247 struct dma_pte *pgd; /* virtual address */ 274 struct dma_pte *pgd; /* virtual address */
248 spinlock_t mapping_lock; /* page table lock */
249 int gaw; /* max guest address width */ 275 int gaw; /* max guest address width */
250 276
251 /* adjusted guest address width, 0 is level 2 30-bit */ 277 /* adjusted guest address width, 0 is level 2 30-bit */
@@ -649,80 +675,78 @@ static inline int width_to_agaw(int width)
649 675
650static inline unsigned int level_to_offset_bits(int level) 676static inline unsigned int level_to_offset_bits(int level)
651{ 677{
652 return (12 + (level - 1) * LEVEL_STRIDE); 678 return (level - 1) * LEVEL_STRIDE;
653} 679}
654 680
655static inline int address_level_offset(u64 addr, int level) 681static inline int pfn_level_offset(unsigned long pfn, int level)
656{ 682{
657 return ((addr >> level_to_offset_bits(level)) & LEVEL_MASK); 683 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
658} 684}
659 685
660static inline u64 level_mask(int level) 686static inline unsigned long level_mask(int level)
661{ 687{
662 return ((u64)-1 << level_to_offset_bits(level)); 688 return -1UL << level_to_offset_bits(level);
663} 689}
664 690
665static inline u64 level_size(int level) 691static inline unsigned long level_size(int level)
666{ 692{
667 return ((u64)1 << level_to_offset_bits(level)); 693 return 1UL << level_to_offset_bits(level);
668} 694}
669 695
670static inline u64 align_to_level(u64 addr, int level) 696static inline unsigned long align_to_level(unsigned long pfn, int level)
671{ 697{
672 return ((addr + level_size(level) - 1) & level_mask(level)); 698 return (pfn + level_size(level) - 1) & level_mask(level);
673} 699}
674 700
675static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr) 701static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
702 unsigned long pfn)
676{ 703{
677 int addr_width = agaw_to_width(domain->agaw); 704 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
678 struct dma_pte *parent, *pte = NULL; 705 struct dma_pte *parent, *pte = NULL;
679 int level = agaw_to_level(domain->agaw); 706 int level = agaw_to_level(domain->agaw);
680 int offset; 707 int offset;
681 unsigned long flags;
682 708
683 BUG_ON(!domain->pgd); 709 BUG_ON(!domain->pgd);
684 710 BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
685 addr &= (((u64)1) << addr_width) - 1;
686 parent = domain->pgd; 711 parent = domain->pgd;
687 712
688 spin_lock_irqsave(&domain->mapping_lock, flags);
689 while (level > 0) { 713 while (level > 0) {
690 void *tmp_page; 714 void *tmp_page;
691 715
692 offset = address_level_offset(addr, level); 716 offset = pfn_level_offset(pfn, level);
693 pte = &parent[offset]; 717 pte = &parent[offset];
694 if (level == 1) 718 if (level == 1)
695 break; 719 break;
696 720
697 if (!dma_pte_present(pte)) { 721 if (!dma_pte_present(pte)) {
722 uint64_t pteval;
723
698 tmp_page = alloc_pgtable_page(); 724 tmp_page = alloc_pgtable_page();
699 725
700 if (!tmp_page) { 726 if (!tmp_page)
701 spin_unlock_irqrestore(&domain->mapping_lock,
702 flags);
703 return NULL; 727 return NULL;
728
729 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
730 pteval = (virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
731 if (cmpxchg64(&pte->val, 0ULL, pteval)) {
732 /* Someone else set it while we were thinking; use theirs. */
733 free_pgtable_page(tmp_page);
734 } else {
735 dma_pte_addr(pte);
736 domain_flush_cache(domain, pte, sizeof(*pte));
704 } 737 }
705 domain_flush_cache(domain, tmp_page, PAGE_SIZE);
706 dma_set_pte_addr(pte, virt_to_phys(tmp_page));
707 /*
708 * high level table always sets r/w, last level page
709 * table control read/write
710 */
711 dma_set_pte_readable(pte);
712 dma_set_pte_writable(pte);
713 domain_flush_cache(domain, pte, sizeof(*pte));
714 } 738 }
715 parent = phys_to_virt(dma_pte_addr(pte)); 739 parent = phys_to_virt(dma_pte_addr(pte));
716 level--; 740 level--;
717 } 741 }
718 742
719 spin_unlock_irqrestore(&domain->mapping_lock, flags);
720 return pte; 743 return pte;
721} 744}
722 745
723/* return address's pte at specific level */ 746/* return address's pte at specific level */
724static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr, 747static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
725 int level) 748 unsigned long pfn,
749 int level)
726{ 750{
727 struct dma_pte *parent, *pte = NULL; 751 struct dma_pte *parent, *pte = NULL;
728 int total = agaw_to_level(domain->agaw); 752 int total = agaw_to_level(domain->agaw);
@@ -730,7 +754,7 @@ static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr,
730 754
731 parent = domain->pgd; 755 parent = domain->pgd;
732 while (level <= total) { 756 while (level <= total) {
733 offset = address_level_offset(addr, total); 757 offset = pfn_level_offset(pfn, total);
734 pte = &parent[offset]; 758 pte = &parent[offset];
735 if (level == total) 759 if (level == total)
736 return pte; 760 return pte;
@@ -743,74 +767,82 @@ static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr,
743 return NULL; 767 return NULL;
744} 768}
745 769
746/* clear one page's page table */
747static void dma_pte_clear_one(struct dmar_domain *domain, u64 addr)
748{
749 struct dma_pte *pte = NULL;
750
751 /* get last level pte */
752 pte = dma_addr_level_pte(domain, addr, 1);
753
754 if (pte) {
755 dma_clear_pte(pte);
756 domain_flush_cache(domain, pte, sizeof(*pte));
757 }
758}
759
760/* clear last level pte, a tlb flush should be followed */ 770/* clear last level pte, a tlb flush should be followed */
761static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end) 771static void dma_pte_clear_range(struct dmar_domain *domain,
772 unsigned long start_pfn,
773 unsigned long last_pfn)
762{ 774{
763 int addr_width = agaw_to_width(domain->agaw); 775 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
764 int npages; 776 struct dma_pte *first_pte, *pte;
777
778 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
779 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
765 780
766 start &= (((u64)1) << addr_width) - 1; 781 /* we don't need lock here; nobody else touches the iova range */
767 end &= (((u64)1) << addr_width) - 1; 782 while (start_pfn <= last_pfn) {
768 /* in case it's partial page */ 783 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1);
769 start &= PAGE_MASK; 784 if (!pte) {
770 end = PAGE_ALIGN(end); 785 start_pfn = align_to_level(start_pfn + 1, 2);
771 npages = (end - start) / VTD_PAGE_SIZE; 786 continue;
787 }
788 do {
789 dma_clear_pte(pte);
790 start_pfn++;
791 pte++;
792 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
772 793
773 /* we don't need lock here, nobody else touches the iova range */ 794 domain_flush_cache(domain, first_pte,
774 while (npages--) { 795 (void *)pte - (void *)first_pte);
775 dma_pte_clear_one(domain, start);
776 start += VTD_PAGE_SIZE;
777 } 796 }
778} 797}
779 798
780/* free page table pages. last level pte should already be cleared */ 799/* free page table pages. last level pte should already be cleared */
781static void dma_pte_free_pagetable(struct dmar_domain *domain, 800static void dma_pte_free_pagetable(struct dmar_domain *domain,
782 u64 start, u64 end) 801 unsigned long start_pfn,
802 unsigned long last_pfn)
783{ 803{
784 int addr_width = agaw_to_width(domain->agaw); 804 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
785 struct dma_pte *pte; 805 struct dma_pte *first_pte, *pte;
786 int total = agaw_to_level(domain->agaw); 806 int total = agaw_to_level(domain->agaw);
787 int level; 807 int level;
788 u64 tmp; 808 unsigned long tmp;
789 809
790 start &= (((u64)1) << addr_width) - 1; 810 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
791 end &= (((u64)1) << addr_width) - 1; 811 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
792 812
793 /* we don't need lock here, nobody else touches the iova range */ 813 /* We don't need lock here; nobody else touches the iova range */
794 level = 2; 814 level = 2;
795 while (level <= total) { 815 while (level <= total) {
796 tmp = align_to_level(start, level); 816 tmp = align_to_level(start_pfn, level);
797 if (tmp >= end || (tmp + level_size(level) > end)) 817
818 /* If we can't even clear one PTE at this level, we're done */
819 if (tmp + level_size(level) - 1 > last_pfn)
798 return; 820 return;
799 821
800 while (tmp < end) { 822 while (tmp + level_size(level) - 1 <= last_pfn) {
801 pte = dma_addr_level_pte(domain, tmp, level); 823 first_pte = pte = dma_pfn_level_pte(domain, tmp, level);
802 if (pte) { 824 if (!pte) {
803 free_pgtable_page( 825 tmp = align_to_level(tmp + 1, level + 1);
804 phys_to_virt(dma_pte_addr(pte))); 826 continue;
805 dma_clear_pte(pte);
806 domain_flush_cache(domain, pte, sizeof(*pte));
807 } 827 }
808 tmp += level_size(level); 828 do {
829 if (dma_pte_present(pte)) {
830 free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
831 dma_clear_pte(pte);
832 }
833 pte++;
834 tmp += level_size(level);
835 } while (!first_pte_in_page(pte) &&
836 tmp + level_size(level) - 1 <= last_pfn);
837
838 domain_flush_cache(domain, first_pte,
839 (void *)pte - (void *)first_pte);
840
809 } 841 }
810 level++; 842 level++;
811 } 843 }
812 /* free pgd */ 844 /* free pgd */
813 if (start == 0 && end >= ((((u64)1) << addr_width) - 1)) { 845 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
814 free_pgtable_page(domain->pgd); 846 free_pgtable_page(domain->pgd);
815 domain->pgd = NULL; 847 domain->pgd = NULL;
816 } 848 }
@@ -1036,11 +1068,11 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1036} 1068}
1037 1069
1038static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, 1070static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
1039 u64 addr, unsigned int pages) 1071 unsigned long pfn, unsigned int pages)
1040{ 1072{
1041 unsigned int mask = ilog2(__roundup_pow_of_two(pages)); 1073 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1074 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1042 1075
1043 BUG_ON(addr & (~VTD_PAGE_MASK));
1044 BUG_ON(pages == 0); 1076 BUG_ON(pages == 0);
1045 1077
1046 /* 1078 /*
@@ -1055,7 +1087,12 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
1055 else 1087 else
1056 iommu->flush.flush_iotlb(iommu, did, addr, mask, 1088 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1057 DMA_TLB_PSI_FLUSH); 1089 DMA_TLB_PSI_FLUSH);
1058 if (did) 1090
1091 /*
1092 * In caching mode, domain ID 0 is reserved for non-present to present
1093 * mapping flush. Device IOTLB doesn't need to be flushed in this case.
1094 */
1095 if (!cap_caching_mode(iommu->cap) || did)
1059 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask); 1096 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
1060} 1097}
1061 1098
@@ -1280,7 +1317,6 @@ static void dmar_init_reserved_ranges(void)
1280 struct pci_dev *pdev = NULL; 1317 struct pci_dev *pdev = NULL;
1281 struct iova *iova; 1318 struct iova *iova;
1282 int i; 1319 int i;
1283 u64 addr, size;
1284 1320
1285 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN); 1321 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
1286 1322
@@ -1303,12 +1339,9 @@ static void dmar_init_reserved_ranges(void)
1303 r = &pdev->resource[i]; 1339 r = &pdev->resource[i];
1304 if (!r->flags || !(r->flags & IORESOURCE_MEM)) 1340 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1305 continue; 1341 continue;
1306 addr = r->start; 1342 iova = reserve_iova(&reserved_iova_list,
1307 addr &= PHYSICAL_PAGE_MASK; 1343 IOVA_PFN(r->start),
1308 size = r->end - addr; 1344 IOVA_PFN(r->end));
1309 size = PAGE_ALIGN(size);
1310 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr),
1311 IOVA_PFN(size + addr) - 1);
1312 if (!iova) 1345 if (!iova)
1313 printk(KERN_ERR "Reserve iova failed\n"); 1346 printk(KERN_ERR "Reserve iova failed\n");
1314 } 1347 }
@@ -1342,7 +1375,6 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
1342 unsigned long sagaw; 1375 unsigned long sagaw;
1343 1376
1344 init_iova_domain(&domain->iovad, DMA_32BIT_PFN); 1377 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
1345 spin_lock_init(&domain->mapping_lock);
1346 spin_lock_init(&domain->iommu_lock); 1378 spin_lock_init(&domain->iommu_lock);
1347 1379
1348 domain_reserve_special_ranges(domain); 1380 domain_reserve_special_ranges(domain);
@@ -1389,7 +1421,6 @@ static void domain_exit(struct dmar_domain *domain)
1389{ 1421{
1390 struct dmar_drhd_unit *drhd; 1422 struct dmar_drhd_unit *drhd;
1391 struct intel_iommu *iommu; 1423 struct intel_iommu *iommu;
1392 u64 end;
1393 1424
1394 /* Domain 0 is reserved, so dont process it */ 1425 /* Domain 0 is reserved, so dont process it */
1395 if (!domain) 1426 if (!domain)
@@ -1398,14 +1429,12 @@ static void domain_exit(struct dmar_domain *domain)
1398 domain_remove_dev_info(domain); 1429 domain_remove_dev_info(domain);
1399 /* destroy iovas */ 1430 /* destroy iovas */
1400 put_iova_domain(&domain->iovad); 1431 put_iova_domain(&domain->iovad);
1401 end = DOMAIN_MAX_ADDR(domain->gaw);
1402 end = end & (~PAGE_MASK);
1403 1432
1404 /* clear ptes */ 1433 /* clear ptes */
1405 dma_pte_clear_range(domain, 0, end); 1434 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1406 1435
1407 /* free page tables */ 1436 /* free page tables */
1408 dma_pte_free_pagetable(domain, 0, end); 1437 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1409 1438
1410 for_each_active_iommu(iommu, drhd) 1439 for_each_active_iommu(iommu, drhd)
1411 if (test_bit(iommu->seq_id, &domain->iommu_bmp)) 1440 if (test_bit(iommu->seq_id, &domain->iommu_bmp))
@@ -1619,42 +1648,86 @@ static int domain_context_mapped(struct pci_dev *pdev)
1619 tmp->devfn); 1648 tmp->devfn);
1620} 1649}
1621 1650
1622static int 1651static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1623domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova, 1652 struct scatterlist *sg, unsigned long phys_pfn,
1624 u64 hpa, size_t size, int prot) 1653 unsigned long nr_pages, int prot)
1625{ 1654{
1626 u64 start_pfn, end_pfn; 1655 struct dma_pte *first_pte = NULL, *pte = NULL;
1627 struct dma_pte *pte; 1656 phys_addr_t uninitialized_var(pteval);
1628 int index; 1657 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
1629 int addr_width = agaw_to_width(domain->agaw); 1658 unsigned long sg_res;
1630 1659
1631 hpa &= (((u64)1) << addr_width) - 1; 1660 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1632 1661
1633 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0) 1662 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1634 return -EINVAL; 1663 return -EINVAL;
1635 iova &= PAGE_MASK; 1664
1636 start_pfn = ((u64)hpa) >> VTD_PAGE_SHIFT; 1665 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1637 end_pfn = (VTD_PAGE_ALIGN(((u64)hpa) + size)) >> VTD_PAGE_SHIFT; 1666
1638 index = 0; 1667 if (sg)
1639 while (start_pfn < end_pfn) { 1668 sg_res = 0;
1640 pte = addr_to_dma_pte(domain, iova + VTD_PAGE_SIZE * index); 1669 else {
1641 if (!pte) 1670 sg_res = nr_pages + 1;
1642 return -ENOMEM; 1671 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1672 }
1673
1674 while (nr_pages--) {
1675 uint64_t tmp;
1676
1677 if (!sg_res) {
1678 sg_res = (sg->offset + sg->length + VTD_PAGE_SIZE - 1) >> VTD_PAGE_SHIFT;
1679 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1680 sg->dma_length = sg->length;
1681 pteval = page_to_phys(sg_page(sg)) | prot;
1682 }
1683 if (!pte) {
1684 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn);
1685 if (!pte)
1686 return -ENOMEM;
1687 }
1643 /* We don't need lock here, nobody else 1688 /* We don't need lock here, nobody else
1644 * touches the iova range 1689 * touches the iova range
1645 */ 1690 */
1646 BUG_ON(dma_pte_addr(pte)); 1691 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
1647 dma_set_pte_addr(pte, start_pfn << VTD_PAGE_SHIFT); 1692 if (tmp) {
1648 dma_set_pte_prot(pte, prot); 1693 static int dumps = 5;
1649 if (prot & DMA_PTE_SNP) 1694 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
1650 dma_set_pte_snp(pte); 1695 iov_pfn, tmp, (unsigned long long)pteval);
1651 domain_flush_cache(domain, pte, sizeof(*pte)); 1696 if (dumps) {
1652 start_pfn++; 1697 dumps--;
1653 index++; 1698 debug_dma_dump_mappings(NULL);
1699 }
1700 WARN_ON(1);
1701 }
1702 pte++;
1703 if (!nr_pages || first_pte_in_page(pte)) {
1704 domain_flush_cache(domain, first_pte,
1705 (void *)pte - (void *)first_pte);
1706 pte = NULL;
1707 }
1708 iov_pfn++;
1709 pteval += VTD_PAGE_SIZE;
1710 sg_res--;
1711 if (!sg_res)
1712 sg = sg_next(sg);
1654 } 1713 }
1655 return 0; 1714 return 0;
1656} 1715}
1657 1716
1717static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1718 struct scatterlist *sg, unsigned long nr_pages,
1719 int prot)
1720{
1721 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
1722}
1723
1724static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1725 unsigned long phys_pfn, unsigned long nr_pages,
1726 int prot)
1727{
1728 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
1729}
1730
1658static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn) 1731static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
1659{ 1732{
1660 if (!iommu) 1733 if (!iommu)
@@ -1845,58 +1918,61 @@ error:
1845 1918
1846static int iommu_identity_mapping; 1919static int iommu_identity_mapping;
1847 1920
1921static int iommu_domain_identity_map(struct dmar_domain *domain,
1922 unsigned long long start,
1923 unsigned long long end)
1924{
1925 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
1926 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
1927
1928 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
1929 dma_to_mm_pfn(last_vpfn))) {
1930 printk(KERN_ERR "IOMMU: reserve iova failed\n");
1931 return -ENOMEM;
1932 }
1933
1934 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
1935 start, end, domain->id);
1936 /*
1937 * RMRR range might have overlap with physical memory range,
1938 * clear it first
1939 */
1940 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
1941
1942 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
1943 last_vpfn - first_vpfn + 1,
1944 DMA_PTE_READ|DMA_PTE_WRITE);
1945}
1946
1848static int iommu_prepare_identity_map(struct pci_dev *pdev, 1947static int iommu_prepare_identity_map(struct pci_dev *pdev,
1849 unsigned long long start, 1948 unsigned long long start,
1850 unsigned long long end) 1949 unsigned long long end)
1851{ 1950{
1852 struct dmar_domain *domain; 1951 struct dmar_domain *domain;
1853 unsigned long size;
1854 unsigned long long base;
1855 int ret; 1952 int ret;
1856 1953
1857 printk(KERN_INFO 1954 printk(KERN_INFO
1858 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n", 1955 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1859 pci_name(pdev), start, end); 1956 pci_name(pdev), start, end);
1860 if (iommu_identity_mapping) 1957
1861 domain = si_domain; 1958 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
1862 else
1863 /* page table init */
1864 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
1865 if (!domain) 1959 if (!domain)
1866 return -ENOMEM; 1960 return -ENOMEM;
1867 1961
1868 /* The address might not be aligned */ 1962 ret = iommu_domain_identity_map(domain, start, end);
1869 base = start & PAGE_MASK;
1870 size = end - base;
1871 size = PAGE_ALIGN(size);
1872 if (!reserve_iova(&domain->iovad, IOVA_PFN(base),
1873 IOVA_PFN(base + size) - 1)) {
1874 printk(KERN_ERR "IOMMU: reserve iova failed\n");
1875 ret = -ENOMEM;
1876 goto error;
1877 }
1878
1879 pr_debug("Mapping reserved region %lx@%llx for %s\n",
1880 size, base, pci_name(pdev));
1881 /*
1882 * RMRR range might have overlap with physical memory range,
1883 * clear it first
1884 */
1885 dma_pte_clear_range(domain, base, base + size);
1886
1887 ret = domain_page_mapping(domain, base, base, size,
1888 DMA_PTE_READ|DMA_PTE_WRITE);
1889 if (ret) 1963 if (ret)
1890 goto error; 1964 goto error;
1891 1965
1892 /* context entry init */ 1966 /* context entry init */
1893 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL); 1967 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
1894 if (!ret) 1968 if (ret)
1895 return 0; 1969 goto error;
1896error: 1970
1971 return 0;
1972
1973 error:
1897 domain_exit(domain); 1974 domain_exit(domain);
1898 return ret; 1975 return ret;
1899
1900} 1976}
1901 1977
1902static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr, 1978static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
@@ -1908,64 +1984,6 @@ static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
1908 rmrr->end_address + 1); 1984 rmrr->end_address + 1);
1909} 1985}
1910 1986
1911#ifdef CONFIG_DMAR_GFX_WA
1912struct iommu_prepare_data {
1913 struct pci_dev *pdev;
1914 int ret;
1915};
1916
1917static int __init iommu_prepare_work_fn(unsigned long start_pfn,
1918 unsigned long end_pfn, void *datax)
1919{
1920 struct iommu_prepare_data *data;
1921
1922 data = (struct iommu_prepare_data *)datax;
1923
1924 data->ret = iommu_prepare_identity_map(data->pdev,
1925 start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
1926 return data->ret;
1927
1928}
1929
1930static int __init iommu_prepare_with_active_regions(struct pci_dev *pdev)
1931{
1932 int nid;
1933 struct iommu_prepare_data data;
1934
1935 data.pdev = pdev;
1936 data.ret = 0;
1937
1938 for_each_online_node(nid) {
1939 work_with_active_regions(nid, iommu_prepare_work_fn, &data);
1940 if (data.ret)
1941 return data.ret;
1942 }
1943 return data.ret;
1944}
1945
1946static void __init iommu_prepare_gfx_mapping(void)
1947{
1948 struct pci_dev *pdev = NULL;
1949 int ret;
1950
1951 for_each_pci_dev(pdev) {
1952 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO ||
1953 !IS_GFX_DEVICE(pdev))
1954 continue;
1955 printk(KERN_INFO "IOMMU: gfx device %s 1-1 mapping\n",
1956 pci_name(pdev));
1957 ret = iommu_prepare_with_active_regions(pdev);
1958 if (ret)
1959 printk(KERN_ERR "IOMMU: mapping reserved region failed\n");
1960 }
1961}
1962#else /* !CONFIG_DMAR_GFX_WA */
1963static inline void iommu_prepare_gfx_mapping(void)
1964{
1965 return;
1966}
1967#endif
1968
1969#ifdef CONFIG_DMAR_FLOPPY_WA 1987#ifdef CONFIG_DMAR_FLOPPY_WA
1970static inline void iommu_prepare_isa(void) 1988static inline void iommu_prepare_isa(void)
1971{ 1989{
@@ -1976,12 +1994,12 @@ static inline void iommu_prepare_isa(void)
1976 if (!pdev) 1994 if (!pdev)
1977 return; 1995 return;
1978 1996
1979 printk(KERN_INFO "IOMMU: Prepare 0-16M unity mapping for LPC\n"); 1997 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
1980 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024); 1998 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
1981 1999
1982 if (ret) 2000 if (ret)
1983 printk(KERN_ERR "IOMMU: Failed to create 0-64M identity map, " 2001 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
1984 "floppy might not work\n"); 2002 "floppy might not work\n");
1985 2003
1986} 2004}
1987#else 2005#else
@@ -2009,16 +2027,30 @@ static int __init init_context_pass_through(void)
2009} 2027}
2010 2028
2011static int md_domain_init(struct dmar_domain *domain, int guest_width); 2029static int md_domain_init(struct dmar_domain *domain, int guest_width);
2030
2031static int __init si_domain_work_fn(unsigned long start_pfn,
2032 unsigned long end_pfn, void *datax)
2033{
2034 int *ret = datax;
2035
2036 *ret = iommu_domain_identity_map(si_domain,
2037 (uint64_t)start_pfn << PAGE_SHIFT,
2038 (uint64_t)end_pfn << PAGE_SHIFT);
2039 return *ret;
2040
2041}
2042
2012static int si_domain_init(void) 2043static int si_domain_init(void)
2013{ 2044{
2014 struct dmar_drhd_unit *drhd; 2045 struct dmar_drhd_unit *drhd;
2015 struct intel_iommu *iommu; 2046 struct intel_iommu *iommu;
2016 int ret = 0; 2047 int nid, ret = 0;
2017 2048
2018 si_domain = alloc_domain(); 2049 si_domain = alloc_domain();
2019 if (!si_domain) 2050 if (!si_domain)
2020 return -EFAULT; 2051 return -EFAULT;
2021 2052
2053 pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
2022 2054
2023 for_each_active_iommu(iommu, drhd) { 2055 for_each_active_iommu(iommu, drhd) {
2024 ret = iommu_attach_domain(si_domain, iommu); 2056 ret = iommu_attach_domain(si_domain, iommu);
@@ -2035,6 +2067,12 @@ static int si_domain_init(void)
2035 2067
2036 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY; 2068 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2037 2069
2070 for_each_online_node(nid) {
2071 work_with_active_regions(nid, si_domain_work_fn, &ret);
2072 if (ret)
2073 return ret;
2074 }
2075
2038 return 0; 2076 return 0;
2039} 2077}
2040 2078
@@ -2081,7 +2119,6 @@ static int domain_add_dev_info(struct dmar_domain *domain,
2081 2119
2082static int iommu_prepare_static_identity_mapping(void) 2120static int iommu_prepare_static_identity_mapping(void)
2083{ 2121{
2084 int i;
2085 struct pci_dev *pdev = NULL; 2122 struct pci_dev *pdev = NULL;
2086 int ret; 2123 int ret;
2087 2124
@@ -2089,20 +2126,14 @@ static int iommu_prepare_static_identity_mapping(void)
2089 if (ret) 2126 if (ret)
2090 return -EFAULT; 2127 return -EFAULT;
2091 2128
2092 printk(KERN_INFO "IOMMU: Setting identity map:\n");
2093 for_each_pci_dev(pdev) { 2129 for_each_pci_dev(pdev) {
2094 for (i = 0; i < e820.nr_map; i++) { 2130 printk(KERN_INFO "IOMMU: identity mapping for device %s\n",
2095 struct e820entry *ei = &e820.map[i]; 2131 pci_name(pdev));
2096 2132
2097 if (ei->type == E820_RAM) { 2133 ret = domain_context_mapping(si_domain, pdev,
2098 ret = iommu_prepare_identity_map(pdev, 2134 CONTEXT_TT_MULTI_LEVEL);
2099 ei->addr, ei->addr + ei->size); 2135 if (ret)
2100 if (ret) { 2136 return ret;
2101 printk(KERN_INFO "1:1 mapping to one domain failed.\n");
2102 return -EFAULT;
2103 }
2104 }
2105 }
2106 ret = domain_add_dev_info(si_domain, pdev); 2137 ret = domain_add_dev_info(si_domain, pdev);
2107 if (ret) 2138 if (ret)
2108 return ret; 2139 return ret;
@@ -2293,8 +2324,6 @@ int __init init_dmars(void)
2293 } 2324 }
2294 } 2325 }
2295 2326
2296 iommu_prepare_gfx_mapping();
2297
2298 iommu_prepare_isa(); 2327 iommu_prepare_isa();
2299 } 2328 }
2300 2329
@@ -2339,50 +2368,40 @@ error:
2339 return ret; 2368 return ret;
2340} 2369}
2341 2370
2342static inline u64 aligned_size(u64 host_addr, size_t size) 2371static inline unsigned long aligned_nrpages(unsigned long host_addr,
2343{ 2372 size_t size)
2344 u64 addr;
2345 addr = (host_addr & (~PAGE_MASK)) + size;
2346 return PAGE_ALIGN(addr);
2347}
2348
2349struct iova *
2350iommu_alloc_iova(struct dmar_domain *domain, size_t size, u64 end)
2351{ 2373{
2352 struct iova *piova; 2374 host_addr &= ~PAGE_MASK;
2375 host_addr += size + PAGE_SIZE - 1;
2353 2376
2354 /* Make sure it's in range */ 2377 return host_addr >> VTD_PAGE_SHIFT;
2355 end = min_t(u64, DOMAIN_MAX_ADDR(domain->gaw), end);
2356 if (!size || (IOVA_START_ADDR + size > end))
2357 return NULL;
2358
2359 piova = alloc_iova(&domain->iovad,
2360 size >> PAGE_SHIFT, IOVA_PFN(end), 1);
2361 return piova;
2362} 2378}
2363 2379
2364static struct iova * 2380static struct iova *intel_alloc_iova(struct device *dev,
2365__intel_alloc_iova(struct device *dev, struct dmar_domain *domain, 2381 struct dmar_domain *domain,
2366 size_t size, u64 dma_mask) 2382 unsigned long nrpages, uint64_t dma_mask)
2367{ 2383{
2368 struct pci_dev *pdev = to_pci_dev(dev); 2384 struct pci_dev *pdev = to_pci_dev(dev);
2369 struct iova *iova = NULL; 2385 struct iova *iova = NULL;
2370 2386
2371 if (dma_mask <= DMA_BIT_MASK(32) || dmar_forcedac) 2387 /* Restrict dma_mask to the width that the iommu can handle */
2372 iova = iommu_alloc_iova(domain, size, dma_mask); 2388 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2373 else { 2389
2390 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
2374 /* 2391 /*
2375 * First try to allocate an io virtual address in 2392 * First try to allocate an io virtual address in
2376 * DMA_BIT_MASK(32) and if that fails then try allocating 2393 * DMA_BIT_MASK(32) and if that fails then try allocating
2377 * from higher range 2394 * from higher range
2378 */ 2395 */
2379 iova = iommu_alloc_iova(domain, size, DMA_BIT_MASK(32)); 2396 iova = alloc_iova(&domain->iovad, nrpages,
2380 if (!iova) 2397 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2381 iova = iommu_alloc_iova(domain, size, dma_mask); 2398 if (iova)
2382 } 2399 return iova;
2383 2400 }
2384 if (!iova) { 2401 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2385 printk(KERN_ERR"Allocating iova for %s failed", pci_name(pdev)); 2402 if (unlikely(!iova)) {
2403 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2404 nrpages, pci_name(pdev));
2386 return NULL; 2405 return NULL;
2387 } 2406 }
2388 2407
@@ -2485,14 +2504,12 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2485 return 0; 2504 return 0;
2486 2505
2487 iommu = domain_get_iommu(domain); 2506 iommu = domain_get_iommu(domain);
2488 size = aligned_size((u64)paddr, size); 2507 size = aligned_nrpages(paddr, size);
2489 2508
2490 iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); 2509 iova = intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
2491 if (!iova) 2510 if (!iova)
2492 goto error; 2511 goto error;
2493 2512
2494 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2495
2496 /* 2513 /*
2497 * Check if DMAR supports zero-length reads on write only 2514 * Check if DMAR supports zero-length reads on write only
2498 * mappings.. 2515 * mappings..
@@ -2508,20 +2525,20 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2508 * might have two guest_addr mapping to the same host paddr, but this 2525 * might have two guest_addr mapping to the same host paddr, but this
2509 * is not a big problem 2526 * is not a big problem
2510 */ 2527 */
2511 ret = domain_page_mapping(domain, start_paddr, 2528 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
2512 ((u64)paddr) & PHYSICAL_PAGE_MASK, 2529 paddr >> VTD_PAGE_SHIFT, size, prot);
2513 size, prot);
2514 if (ret) 2530 if (ret)
2515 goto error; 2531 goto error;
2516 2532
2517 /* it's a non-present to present mapping. Only flush if caching mode */ 2533 /* it's a non-present to present mapping. Only flush if caching mode */
2518 if (cap_caching_mode(iommu->cap)) 2534 if (cap_caching_mode(iommu->cap))
2519 iommu_flush_iotlb_psi(iommu, 0, start_paddr, 2535 iommu_flush_iotlb_psi(iommu, 0, mm_to_dma_pfn(iova->pfn_lo), size);
2520 size >> VTD_PAGE_SHIFT);
2521 else 2536 else
2522 iommu_flush_write_buffer(iommu); 2537 iommu_flush_write_buffer(iommu);
2523 2538
2524 return start_paddr + ((u64)paddr & (~PAGE_MASK)); 2539 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2540 start_paddr += paddr & ~PAGE_MASK;
2541 return start_paddr;
2525 2542
2526error: 2543error:
2527 if (iova) 2544 if (iova)
@@ -2614,7 +2631,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2614{ 2631{
2615 struct pci_dev *pdev = to_pci_dev(dev); 2632 struct pci_dev *pdev = to_pci_dev(dev);
2616 struct dmar_domain *domain; 2633 struct dmar_domain *domain;
2617 unsigned long start_addr; 2634 unsigned long start_pfn, last_pfn;
2618 struct iova *iova; 2635 struct iova *iova;
2619 struct intel_iommu *iommu; 2636 struct intel_iommu *iommu;
2620 2637
@@ -2627,22 +2644,25 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2627 iommu = domain_get_iommu(domain); 2644 iommu = domain_get_iommu(domain);
2628 2645
2629 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr)); 2646 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
2630 if (!iova) 2647 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
2648 (unsigned long long)dev_addr))
2631 return; 2649 return;
2632 2650
2633 start_addr = iova->pfn_lo << PAGE_SHIFT; 2651 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2634 size = aligned_size((u64)dev_addr, size); 2652 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
2635 2653
2636 pr_debug("Device %s unmapping: %zx@%llx\n", 2654 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2637 pci_name(pdev), size, (unsigned long long)start_addr); 2655 pci_name(pdev), start_pfn, last_pfn);
2638 2656
2639 /* clear the whole page */ 2657 /* clear the whole page */
2640 dma_pte_clear_range(domain, start_addr, start_addr + size); 2658 dma_pte_clear_range(domain, start_pfn, last_pfn);
2659
2641 /* free page tables */ 2660 /* free page tables */
2642 dma_pte_free_pagetable(domain, start_addr, start_addr + size); 2661 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2662
2643 if (intel_iommu_strict) { 2663 if (intel_iommu_strict) {
2644 iommu_flush_iotlb_psi(iommu, domain->id, start_addr, 2664 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
2645 size >> VTD_PAGE_SHIFT); 2665 last_pfn - start_pfn + 1);
2646 /* free iova */ 2666 /* free iova */
2647 __free_iova(&domain->iovad, iova); 2667 __free_iova(&domain->iovad, iova);
2648 } else { 2668 } else {
@@ -2700,14 +2720,10 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2700 int nelems, enum dma_data_direction dir, 2720 int nelems, enum dma_data_direction dir,
2701 struct dma_attrs *attrs) 2721 struct dma_attrs *attrs)
2702{ 2722{
2703 int i;
2704 struct pci_dev *pdev = to_pci_dev(hwdev); 2723 struct pci_dev *pdev = to_pci_dev(hwdev);
2705 struct dmar_domain *domain; 2724 struct dmar_domain *domain;
2706 unsigned long start_addr; 2725 unsigned long start_pfn, last_pfn;
2707 struct iova *iova; 2726 struct iova *iova;
2708 size_t size = 0;
2709 phys_addr_t addr;
2710 struct scatterlist *sg;
2711 struct intel_iommu *iommu; 2727 struct intel_iommu *iommu;
2712 2728
2713 if (iommu_no_mapping(pdev)) 2729 if (iommu_no_mapping(pdev))
@@ -2719,22 +2735,21 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2719 iommu = domain_get_iommu(domain); 2735 iommu = domain_get_iommu(domain);
2720 2736
2721 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address)); 2737 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
2722 if (!iova) 2738 if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
2739 (unsigned long long)sglist[0].dma_address))
2723 return; 2740 return;
2724 for_each_sg(sglist, sg, nelems, i) {
2725 addr = page_to_phys(sg_page(sg)) + sg->offset;
2726 size += aligned_size((u64)addr, sg->length);
2727 }
2728 2741
2729 start_addr = iova->pfn_lo << PAGE_SHIFT; 2742 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2743 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
2730 2744
2731 /* clear the whole page */ 2745 /* clear the whole page */
2732 dma_pte_clear_range(domain, start_addr, start_addr + size); 2746 dma_pte_clear_range(domain, start_pfn, last_pfn);
2747
2733 /* free page tables */ 2748 /* free page tables */
2734 dma_pte_free_pagetable(domain, start_addr, start_addr + size); 2749 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2735 2750
2736 iommu_flush_iotlb_psi(iommu, domain->id, start_addr, 2751 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
2737 size >> VTD_PAGE_SHIFT); 2752 (last_pfn - start_pfn + 1));
2738 2753
2739 /* free iova */ 2754 /* free iova */
2740 __free_iova(&domain->iovad, iova); 2755 __free_iova(&domain->iovad, iova);
@@ -2757,17 +2772,16 @@ static int intel_nontranslate_map_sg(struct device *hddev,
2757static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, 2772static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2758 enum dma_data_direction dir, struct dma_attrs *attrs) 2773 enum dma_data_direction dir, struct dma_attrs *attrs)
2759{ 2774{
2760 phys_addr_t addr;
2761 int i; 2775 int i;
2762 struct pci_dev *pdev = to_pci_dev(hwdev); 2776 struct pci_dev *pdev = to_pci_dev(hwdev);
2763 struct dmar_domain *domain; 2777 struct dmar_domain *domain;
2764 size_t size = 0; 2778 size_t size = 0;
2765 int prot = 0; 2779 int prot = 0;
2766 size_t offset = 0; 2780 size_t offset_pfn = 0;
2767 struct iova *iova = NULL; 2781 struct iova *iova = NULL;
2768 int ret; 2782 int ret;
2769 struct scatterlist *sg; 2783 struct scatterlist *sg;
2770 unsigned long start_addr; 2784 unsigned long start_vpfn;
2771 struct intel_iommu *iommu; 2785 struct intel_iommu *iommu;
2772 2786
2773 BUG_ON(dir == DMA_NONE); 2787 BUG_ON(dir == DMA_NONE);
@@ -2780,12 +2794,10 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
2780 2794
2781 iommu = domain_get_iommu(domain); 2795 iommu = domain_get_iommu(domain);
2782 2796
2783 for_each_sg(sglist, sg, nelems, i) { 2797 for_each_sg(sglist, sg, nelems, i)
2784 addr = page_to_phys(sg_page(sg)) + sg->offset; 2798 size += aligned_nrpages(sg->offset, sg->length);
2785 size += aligned_size((u64)addr, sg->length);
2786 }
2787 2799
2788 iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); 2800 iova = intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
2789 if (!iova) { 2801 if (!iova) {
2790 sglist->dma_length = 0; 2802 sglist->dma_length = 0;
2791 return 0; 2803 return 0;
@@ -2801,35 +2813,24 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
2801 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) 2813 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2802 prot |= DMA_PTE_WRITE; 2814 prot |= DMA_PTE_WRITE;
2803 2815
2804 start_addr = iova->pfn_lo << PAGE_SHIFT; 2816 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
2805 offset = 0; 2817
2806 for_each_sg(sglist, sg, nelems, i) { 2818 ret = domain_sg_mapping(domain, start_vpfn, sglist, mm_to_dma_pfn(size), prot);
2807 addr = page_to_phys(sg_page(sg)) + sg->offset; 2819 if (unlikely(ret)) {
2808 size = aligned_size((u64)addr, sg->length); 2820 /* clear the page */
2809 ret = domain_page_mapping(domain, start_addr + offset, 2821 dma_pte_clear_range(domain, start_vpfn,
2810 ((u64)addr) & PHYSICAL_PAGE_MASK, 2822 start_vpfn + size - 1);
2811 size, prot); 2823 /* free page tables */
2812 if (ret) { 2824 dma_pte_free_pagetable(domain, start_vpfn,
2813 /* clear the page */ 2825 start_vpfn + size - 1);
2814 dma_pte_clear_range(domain, start_addr, 2826 /* free iova */
2815 start_addr + offset); 2827 __free_iova(&domain->iovad, iova);
2816 /* free page tables */ 2828 return 0;
2817 dma_pte_free_pagetable(domain, start_addr,
2818 start_addr + offset);
2819 /* free iova */
2820 __free_iova(&domain->iovad, iova);
2821 return 0;
2822 }
2823 sg->dma_address = start_addr + offset +
2824 ((u64)addr & (~PAGE_MASK));
2825 sg->dma_length = sg->length;
2826 offset += size;
2827 } 2829 }
2828 2830
2829 /* it's a non-present to present mapping. Only flush if caching mode */ 2831 /* it's a non-present to present mapping. Only flush if caching mode */
2830 if (cap_caching_mode(iommu->cap)) 2832 if (cap_caching_mode(iommu->cap))
2831 iommu_flush_iotlb_psi(iommu, 0, start_addr, 2833 iommu_flush_iotlb_psi(iommu, 0, start_vpfn, offset_pfn);
2832 offset >> VTD_PAGE_SHIFT);
2833 else 2834 else
2834 iommu_flush_write_buffer(iommu); 2835 iommu_flush_write_buffer(iommu);
2835 2836
@@ -3334,7 +3335,6 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
3334 int adjust_width; 3335 int adjust_width;
3335 3336
3336 init_iova_domain(&domain->iovad, DMA_32BIT_PFN); 3337 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
3337 spin_lock_init(&domain->mapping_lock);
3338 spin_lock_init(&domain->iommu_lock); 3338 spin_lock_init(&domain->iommu_lock);
3339 3339
3340 domain_reserve_special_ranges(domain); 3340 domain_reserve_special_ranges(domain);
@@ -3388,8 +3388,6 @@ static void iommu_free_vm_domain(struct dmar_domain *domain)
3388 3388
3389static void vm_domain_exit(struct dmar_domain *domain) 3389static void vm_domain_exit(struct dmar_domain *domain)
3390{ 3390{
3391 u64 end;
3392
3393 /* Domain 0 is reserved, so dont process it */ 3391 /* Domain 0 is reserved, so dont process it */
3394 if (!domain) 3392 if (!domain)
3395 return; 3393 return;
@@ -3397,14 +3395,12 @@ static void vm_domain_exit(struct dmar_domain *domain)
3397 vm_domain_remove_all_dev_info(domain); 3395 vm_domain_remove_all_dev_info(domain);
3398 /* destroy iovas */ 3396 /* destroy iovas */
3399 put_iova_domain(&domain->iovad); 3397 put_iova_domain(&domain->iovad);
3400 end = DOMAIN_MAX_ADDR(domain->gaw);
3401 end = end & (~VTD_PAGE_MASK);
3402 3398
3403 /* clear ptes */ 3399 /* clear ptes */
3404 dma_pte_clear_range(domain, 0, end); 3400 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
3405 3401
3406 /* free page tables */ 3402 /* free page tables */
3407 dma_pte_free_pagetable(domain, 0, end); 3403 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
3408 3404
3409 iommu_free_vm_domain(domain); 3405 iommu_free_vm_domain(domain);
3410 free_domain_mem(domain); 3406 free_domain_mem(domain);
@@ -3513,7 +3509,7 @@ static int intel_iommu_map_range(struct iommu_domain *domain,
3513 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping) 3509 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
3514 prot |= DMA_PTE_SNP; 3510 prot |= DMA_PTE_SNP;
3515 3511
3516 max_addr = (iova & VTD_PAGE_MASK) + VTD_PAGE_ALIGN(size); 3512 max_addr = iova + size;
3517 if (dmar_domain->max_addr < max_addr) { 3513 if (dmar_domain->max_addr < max_addr) {
3518 int min_agaw; 3514 int min_agaw;
3519 u64 end; 3515 u64 end;
@@ -3531,8 +3527,11 @@ static int intel_iommu_map_range(struct iommu_domain *domain,
3531 } 3527 }
3532 dmar_domain->max_addr = max_addr; 3528 dmar_domain->max_addr = max_addr;
3533 } 3529 }
3534 3530 /* Round up size to next multiple of PAGE_SIZE, if it and
3535 ret = domain_page_mapping(dmar_domain, iova, hpa, size, prot); 3531 the low bits of hpa would take us onto the next page */
3532 size = aligned_nrpages(hpa, size);
3533 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
3534 hpa >> VTD_PAGE_SHIFT, size, prot);
3536 return ret; 3535 return ret;
3537} 3536}
3538 3537
@@ -3540,15 +3539,12 @@ static void intel_iommu_unmap_range(struct iommu_domain *domain,
3540 unsigned long iova, size_t size) 3539 unsigned long iova, size_t size)
3541{ 3540{
3542 struct dmar_domain *dmar_domain = domain->priv; 3541 struct dmar_domain *dmar_domain = domain->priv;
3543 dma_addr_t base;
3544 3542
3545 /* The address might not be aligned */ 3543 dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
3546 base = iova & VTD_PAGE_MASK; 3544 (iova + size - 1) >> VTD_PAGE_SHIFT);
3547 size = VTD_PAGE_ALIGN(size);
3548 dma_pte_clear_range(dmar_domain, base, base + size);
3549 3545
3550 if (dmar_domain->max_addr == base + size) 3546 if (dmar_domain->max_addr == iova + size)
3551 dmar_domain->max_addr = base; 3547 dmar_domain->max_addr = iova;
3552} 3548}
3553 3549
3554static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, 3550static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -3558,7 +3554,7 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
3558 struct dma_pte *pte; 3554 struct dma_pte *pte;
3559 u64 phys = 0; 3555 u64 phys = 0;
3560 3556
3561 pte = addr_to_dma_pte(dmar_domain, iova); 3557 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT);
3562 if (pte) 3558 if (pte)
3563 phys = dma_pte_addr(pte); 3559 phys = dma_pte_addr(pte);
3564 3560
diff --git a/drivers/pci/iova.c b/drivers/pci/iova.c
index 2287116e9822..46dd440e2315 100644
--- a/drivers/pci/iova.c
+++ b/drivers/pci/iova.c
@@ -1,9 +1,19 @@
1/* 1/*
2 * Copyright (c) 2006, Intel Corporation. 2 * Copyright © 2006-2009, Intel Corporation.
3 * 3 *
4 * This file is released under the GPLv2. 4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
5 * 16 *
6 * Copyright (C) 2006-2008 Intel Corporation
7 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 17 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8 */ 18 */
9 19
@@ -123,7 +133,15 @@ move_left:
123 /* Insert the new_iova into domain rbtree by holding writer lock */ 133 /* Insert the new_iova into domain rbtree by holding writer lock */
124 /* Add new node and rebalance tree. */ 134 /* Add new node and rebalance tree. */
125 { 135 {
126 struct rb_node **entry = &((prev)), *parent = NULL; 136 struct rb_node **entry, *parent = NULL;
137
138 /* If we have 'prev', it's a valid place to start the
139 insertion. Otherwise, start from the root. */
140 if (prev)
141 entry = &prev;
142 else
143 entry = &iovad->rbroot.rb_node;
144
127 /* Figure out where to put new node */ 145 /* Figure out where to put new node */
128 while (*entry) { 146 while (*entry) {
129 struct iova *this = container_of(*entry, 147 struct iova *this = container_of(*entry,
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index fee6a4022bc1..46dad12f952f 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -355,10 +355,9 @@ config EEEPC_LAPTOP
355 depends on INPUT 355 depends on INPUT
356 depends on EXPERIMENTAL 356 depends on EXPERIMENTAL
357 depends on RFKILL || RFKILL = n 357 depends on RFKILL || RFKILL = n
358 depends on HOTPLUG_PCI
358 select BACKLIGHT_CLASS_DEVICE 359 select BACKLIGHT_CLASS_DEVICE
359 select HWMON 360 select HWMON
360 select HOTPLUG
361 select HOTPLUG_PCI if PCI
362 ---help--- 361 ---help---
363 This driver supports the Fn-Fx keys on Eee PC laptops. 362 This driver supports the Fn-Fx keys on Eee PC laptops.
364 363
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c
index aafd3e6ebb0d..a118eb0f1e67 100644
--- a/drivers/rtc/rtc-bfin.c
+++ b/drivers/rtc/rtc-bfin.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * Blackfin On-Chip Real Time Clock Driver 2 * Blackfin On-Chip Real Time Clock Driver
3 * Supports BF52[257]/BF53[123]/BF53[467]/BF54[24789] 3 * Supports BF51x/BF52x/BF53[123]/BF53[467]/BF54x
4 * 4 *
5 * Copyright 2004-2008 Analog Devices Inc. 5 * Copyright 2004-2009 Analog Devices Inc.
6 * 6 *
7 * Enter bugs at http://blackfin.uclinux.org/ 7 * Enter bugs at http://blackfin.uclinux.org/
8 * 8 *
@@ -363,7 +363,7 @@ static int __devinit bfin_rtc_probe(struct platform_device *pdev)
363 struct bfin_rtc *rtc; 363 struct bfin_rtc *rtc;
364 struct device *dev = &pdev->dev; 364 struct device *dev = &pdev->dev;
365 int ret = 0; 365 int ret = 0;
366 unsigned long timeout; 366 unsigned long timeout = jiffies + HZ;
367 367
368 dev_dbg_stamp(dev); 368 dev_dbg_stamp(dev);
369 369
@@ -374,32 +374,32 @@ static int __devinit bfin_rtc_probe(struct platform_device *pdev)
374 platform_set_drvdata(pdev, rtc); 374 platform_set_drvdata(pdev, rtc);
375 device_init_wakeup(dev, 1); 375 device_init_wakeup(dev, 1);
376 376
377 /* Register our RTC with the RTC framework */
378 rtc->rtc_dev = rtc_device_register(pdev->name, dev, &bfin_rtc_ops,
379 THIS_MODULE);
380 if (unlikely(IS_ERR(rtc->rtc_dev))) {
381 ret = PTR_ERR(rtc->rtc_dev);
382 goto err;
383 }
384
377 /* Grab the IRQ and init the hardware */ 385 /* Grab the IRQ and init the hardware */
378 ret = request_irq(IRQ_RTC, bfin_rtc_interrupt, IRQF_SHARED, pdev->name, dev); 386 ret = request_irq(IRQ_RTC, bfin_rtc_interrupt, IRQF_SHARED, pdev->name, dev);
379 if (unlikely(ret)) 387 if (unlikely(ret))
380 goto err; 388 goto err_reg;
381 /* sometimes the bootloader touched things, but the write complete was not 389 /* sometimes the bootloader touched things, but the write complete was not
382 * enabled, so let's just do a quick timeout here since the IRQ will not fire ... 390 * enabled, so let's just do a quick timeout here since the IRQ will not fire ...
383 */ 391 */
384 timeout = jiffies + HZ;
385 while (bfin_read_RTC_ISTAT() & RTC_ISTAT_WRITE_PENDING) 392 while (bfin_read_RTC_ISTAT() & RTC_ISTAT_WRITE_PENDING)
386 if (time_after(jiffies, timeout)) 393 if (time_after(jiffies, timeout))
387 break; 394 break;
388 bfin_rtc_reset(dev, RTC_ISTAT_WRITE_COMPLETE); 395 bfin_rtc_reset(dev, RTC_ISTAT_WRITE_COMPLETE);
389 bfin_write_RTC_SWCNT(0); 396 bfin_write_RTC_SWCNT(0);
390 397
391 /* Register our RTC with the RTC framework */
392 rtc->rtc_dev = rtc_device_register(pdev->name, dev, &bfin_rtc_ops, THIS_MODULE);
393 if (unlikely(IS_ERR(rtc->rtc_dev))) {
394 ret = PTR_ERR(rtc->rtc_dev);
395 goto err_irq;
396 }
397
398 return 0; 398 return 0;
399 399
400 err_irq: 400err_reg:
401 free_irq(IRQ_RTC, dev); 401 rtc_device_unregister(rtc->rtc_dev);
402 err: 402err:
403 kfree(rtc); 403 kfree(rtc);
404 return ret; 404 return ret;
405} 405}
diff --git a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
index 74369a3f963b..c399f485aa7d 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
@@ -13,6 +13,7 @@
13 13
14#include <linux/inet.h> 14#include <linux/inet.h>
15#include <linux/crypto.h> 15#include <linux/crypto.h>
16#include <linux/if_vlan.h>
16#include <net/dst.h> 17#include <net/dst.h>
17#include <net/tcp.h> 18#include <net/tcp.h>
18#include <scsi/scsi_cmnd.h> 19#include <scsi/scsi_cmnd.h>
@@ -184,6 +185,9 @@ static struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *ndev)
184 struct cxgb3i_adapter *snic; 185 struct cxgb3i_adapter *snic;
185 int i; 186 int i;
186 187
188 if (ndev->priv_flags & IFF_802_1Q_VLAN)
189 ndev = vlan_dev_real_dev(ndev);
190
187 read_lock(&cxgb3i_snic_rwlock); 191 read_lock(&cxgb3i_snic_rwlock);
188 list_for_each_entry(snic, &cxgb3i_snic_list, list_head) { 192 list_for_each_entry(snic, &cxgb3i_snic_list, list_head) {
189 for (i = 0; i < snic->hba_cnt; i++) { 193 for (i = 0; i < snic->hba_cnt; i++) {
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index a84072865fc2..2c266c01dc5a 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -473,16 +473,16 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
473 * limitation for the device. Try 40-bit first, and 473 * limitation for the device. Try 40-bit first, and
474 * fail to 32-bit. 474 * fail to 32-bit.
475 */ 475 */
476 err = pci_set_dma_mask(pdev, DMA_40BIT_MASK); 476 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
477 if (err) { 477 if (err) {
478 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 478 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
479 if (err) { 479 if (err) {
480 shost_printk(KERN_ERR, fnic->lport->host, 480 shost_printk(KERN_ERR, fnic->lport->host,
481 "No usable DMA configuration " 481 "No usable DMA configuration "
482 "aborting\n"); 482 "aborting\n");
483 goto err_out_release_regions; 483 goto err_out_release_regions;
484 } 484 }
485 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 485 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
486 if (err) { 486 if (err) {
487 shost_printk(KERN_ERR, fnic->lport->host, 487 shost_printk(KERN_ERR, fnic->lport->host,
488 "Unable to obtain 32-bit DMA " 488 "Unable to obtain 32-bit DMA "
@@ -490,7 +490,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
490 goto err_out_release_regions; 490 goto err_out_release_regions;
491 } 491 }
492 } else { 492 } else {
493 err = pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK); 493 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
494 if (err) { 494 if (err) {
495 shost_printk(KERN_ERR, fnic->lport->host, 495 shost_printk(KERN_ERR, fnic->lport->host,
496 "Unable to obtain 40-bit DMA " 496 "Unable to obtain 40-bit DMA "
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index eabf36502856..bfc996971b81 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -245,7 +245,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
245 struct vnic_wq_copy *wq, 245 struct vnic_wq_copy *wq,
246 struct fnic_io_req *io_req, 246 struct fnic_io_req *io_req,
247 struct scsi_cmnd *sc, 247 struct scsi_cmnd *sc,
248 u32 sg_count) 248 int sg_count)
249{ 249{
250 struct scatterlist *sg; 250 struct scatterlist *sg;
251 struct fc_rport *rport = starget_to_rport(scsi_target(sc->device)); 251 struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
@@ -260,9 +260,6 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
260 char msg[2]; 260 char msg[2];
261 261
262 if (sg_count) { 262 if (sg_count) {
263 BUG_ON(sg_count < 0);
264 BUG_ON(sg_count > FNIC_MAX_SG_DESC_CNT);
265
266 /* For each SGE, create a device desc entry */ 263 /* For each SGE, create a device desc entry */
267 desc = io_req->sgl_list; 264 desc = io_req->sgl_list;
268 for_each_sg(scsi_sglist(sc), sg, sg_count, i) { 265 for_each_sg(scsi_sglist(sc), sg, sg_count, i) {
@@ -344,7 +341,7 @@ int fnic_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
344 struct fnic *fnic; 341 struct fnic *fnic;
345 struct vnic_wq_copy *wq; 342 struct vnic_wq_copy *wq;
346 int ret; 343 int ret;
347 u32 sg_count; 344 int sg_count;
348 unsigned long flags; 345 unsigned long flags;
349 unsigned long ptr; 346 unsigned long ptr;
350 347
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 869a11bdccbd..9928704e235f 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -1095,9 +1095,14 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct)
1095 MAX_INDIRECT_BUFS); 1095 MAX_INDIRECT_BUFS);
1096 hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS; 1096 hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
1097 } 1097 }
1098
1099 if (hostdata->madapter_info.os_type == 3) {
1100 enable_fast_fail(hostdata);
1101 return;
1102 }
1098 } 1103 }
1099 1104
1100 enable_fast_fail(hostdata); 1105 send_srp_login(hostdata);
1101} 1106}
1102 1107
1103/** 1108/**
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 2eee9e6e4fe8..292c02f810d0 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3670,13 +3670,14 @@ static void
3670fc_bsg_goose_queue(struct fc_rport *rport) 3670fc_bsg_goose_queue(struct fc_rport *rport)
3671{ 3671{
3672 int flagset; 3672 int flagset;
3673 unsigned long flags;
3673 3674
3674 if (!rport->rqst_q) 3675 if (!rport->rqst_q)
3675 return; 3676 return;
3676 3677
3677 get_device(&rport->dev); 3678 get_device(&rport->dev);
3678 3679
3679 spin_lock(rport->rqst_q->queue_lock); 3680 spin_lock_irqsave(rport->rqst_q->queue_lock, flags);
3680 flagset = test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags) && 3681 flagset = test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags) &&
3681 !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags); 3682 !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags);
3682 if (flagset) 3683 if (flagset)
@@ -3684,7 +3685,7 @@ fc_bsg_goose_queue(struct fc_rport *rport)
3684 __blk_run_queue(rport->rqst_q); 3685 __blk_run_queue(rport->rqst_q);
3685 if (flagset) 3686 if (flagset)
3686 queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q); 3687 queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q);
3687 spin_unlock(rport->rqst_q->queue_lock); 3688 spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags);
3688 3689
3689 put_device(&rport->dev); 3690 put_device(&rport->dev);
3690} 3691}
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 8201387b4daa..ef142fd47a83 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -210,13 +210,11 @@ static void sg_put_dev(Sg_device *sdp);
210static int sg_allow_access(struct file *filp, unsigned char *cmd) 210static int sg_allow_access(struct file *filp, unsigned char *cmd)
211{ 211{
212 struct sg_fd *sfp = (struct sg_fd *)filp->private_data; 212 struct sg_fd *sfp = (struct sg_fd *)filp->private_data;
213 struct request_queue *q = sfp->parentdp->device->request_queue;
214 213
215 if (sfp->parentdp->device->type == TYPE_SCANNER) 214 if (sfp->parentdp->device->type == TYPE_SCANNER)
216 return 0; 215 return 0;
217 216
218 return blk_verify_command(&q->cmd_filter, 217 return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE);
219 cmd, filp->f_mode & FMODE_WRITE);
220} 218}
221 219
222static int 220static int
diff --git a/drivers/scsi/zalon.c b/drivers/scsi/zalon.c
index 97f3158fa7b5..27e84e4b1fa9 100644
--- a/drivers/scsi/zalon.c
+++ b/drivers/scsi/zalon.c
@@ -134,7 +134,7 @@ zalon_probe(struct parisc_device *dev)
134 134
135 host = ncr_attach(&zalon7xx_template, unit, &device); 135 host = ncr_attach(&zalon7xx_template, unit, &device);
136 if (!host) 136 if (!host)
137 goto fail; 137 return -ENODEV;
138 138
139 if (request_irq(dev->irq, ncr53c8xx_intr, IRQF_SHARED, "zalon", host)) { 139 if (request_irq(dev->irq, ncr53c8xx_intr, IRQF_SHARED, "zalon", host)) {
140 dev_printk(KERN_ERR, &dev->dev, "irq problem with %d, detaching\n ", 140 dev_printk(KERN_ERR, &dev->dev, "irq problem with %d, detaching\n ",
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index a07015d646dd..6160e03f410c 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -759,6 +759,8 @@ static int pci_netmos_init(struct pci_dev *dev)
759 /* subdevice 0x00PS means <P> parallel, <S> serial */ 759 /* subdevice 0x00PS means <P> parallel, <S> serial */
760 unsigned int num_serial = dev->subsystem_device & 0xf; 760 unsigned int num_serial = dev->subsystem_device & 0xf;
761 761
762 if (dev->device == PCI_DEVICE_ID_NETMOS_9901)
763 return 0;
762 if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM && 764 if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM &&
763 dev->subsystem_device == 0x0299) 765 dev->subsystem_device == 0x0299)
764 return 0; 766 return 0;
@@ -3557,6 +3559,10 @@ static struct pci_device_id serial_pci_tbl[] = {
3557 PCI_VENDOR_ID_IBM, 0x0299, 3559 PCI_VENDOR_ID_IBM, 0x0299,
3558 0, 0, pbn_b0_bt_2_115200 }, 3560 0, 0, pbn_b0_bt_2_115200 },
3559 3561
3562 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9901,
3563 0xA000, 0x1000,
3564 0, 0, pbn_b0_1_115200 },
3565
3560 /* 3566 /*
3561 * These entries match devices with class COMMUNICATION_SERIAL, 3567 * These entries match devices with class COMMUNICATION_SERIAL,
3562 * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL 3568 * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL
diff --git a/drivers/spi/omap_uwire.c b/drivers/spi/omap_uwire.c
index aa90ddb37066..8980a5640bd9 100644
--- a/drivers/spi/omap_uwire.c
+++ b/drivers/spi/omap_uwire.c
@@ -514,6 +514,8 @@ static int __init uwire_probe(struct platform_device *pdev)
514 /* the spi->mode bits understood by this driver: */ 514 /* the spi->mode bits understood by this driver: */
515 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; 515 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
516 516
517 master->flags = SPI_MASTER_HALF_DUPLEX;
518
517 master->bus_num = 2; /* "official" */ 519 master->bus_num = 2; /* "official" */
518 master->num_chipselect = 4; 520 master->num_chipselect = 4;
519 master->setup = uwire_setup; 521 master->setup = uwire_setup;
diff --git a/drivers/spi/spi_bitbang.c b/drivers/spi/spi_bitbang.c
index 2a5abc08e857..f1db395dd889 100644
--- a/drivers/spi/spi_bitbang.c
+++ b/drivers/spi/spi_bitbang.c
@@ -258,6 +258,11 @@ static void bitbang_work(struct work_struct *work)
258 struct spi_bitbang *bitbang = 258 struct spi_bitbang *bitbang =
259 container_of(work, struct spi_bitbang, work); 259 container_of(work, struct spi_bitbang, work);
260 unsigned long flags; 260 unsigned long flags;
261 int do_setup = -1;
262 int (*setup_transfer)(struct spi_device *,
263 struct spi_transfer *);
264
265 setup_transfer = bitbang->setup_transfer;
261 266
262 spin_lock_irqsave(&bitbang->lock, flags); 267 spin_lock_irqsave(&bitbang->lock, flags);
263 bitbang->busy = 1; 268 bitbang->busy = 1;
@@ -269,8 +274,6 @@ static void bitbang_work(struct work_struct *work)
269 unsigned tmp; 274 unsigned tmp;
270 unsigned cs_change; 275 unsigned cs_change;
271 int status; 276 int status;
272 int (*setup_transfer)(struct spi_device *,
273 struct spi_transfer *);
274 277
275 m = container_of(bitbang->queue.next, struct spi_message, 278 m = container_of(bitbang->queue.next, struct spi_message,
276 queue); 279 queue);
@@ -287,19 +290,19 @@ static void bitbang_work(struct work_struct *work)
287 tmp = 0; 290 tmp = 0;
288 cs_change = 1; 291 cs_change = 1;
289 status = 0; 292 status = 0;
290 setup_transfer = NULL;
291 293
292 list_for_each_entry (t, &m->transfers, transfer_list) { 294 list_for_each_entry (t, &m->transfers, transfer_list) {
293 295
294 /* override or restore speed and wordsize */ 296 /* override speed or wordsize? */
295 if (t->speed_hz || t->bits_per_word) { 297 if (t->speed_hz || t->bits_per_word)
296 setup_transfer = bitbang->setup_transfer; 298 do_setup = 1;
299
300 /* init (-1) or override (1) transfer params */
301 if (do_setup != 0) {
297 if (!setup_transfer) { 302 if (!setup_transfer) {
298 status = -ENOPROTOOPT; 303 status = -ENOPROTOOPT;
299 break; 304 break;
300 } 305 }
301 }
302 if (setup_transfer) {
303 status = setup_transfer(spi, t); 306 status = setup_transfer(spi, t);
304 if (status < 0) 307 if (status < 0)
305 break; 308 break;
@@ -363,9 +366,10 @@ static void bitbang_work(struct work_struct *work)
363 m->status = status; 366 m->status = status;
364 m->complete(m->context); 367 m->complete(m->context);
365 368
366 /* restore speed and wordsize */ 369 /* restore speed and wordsize if it was overridden */
367 if (setup_transfer) 370 if (do_setup == 1)
368 setup_transfer(spi, NULL); 371 setup_transfer(spi, NULL);
372 do_setup = 0;
369 373
370 /* normally deactivate chipselect ... unless no error and 374 /* normally deactivate chipselect ... unless no error and
371 * cs_change has hinted that the next message will probably 375 * cs_change has hinted that the next message will probably
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 5d869c4d3eb2..606e7a40a8da 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -58,15 +58,20 @@ static unsigned long minors[N_SPI_MINORS / BITS_PER_LONG];
58 58
59 59
60/* Bit masks for spi_device.mode management. Note that incorrect 60/* Bit masks for spi_device.mode management. Note that incorrect
61 * settings for CS_HIGH and 3WIRE can cause *lots* of trouble for other 61 * settings for some settings can cause *lots* of trouble for other
62 * devices on a shared bus: CS_HIGH, because this device will be 62 * devices on a shared bus:
63 * active when it shouldn't be; 3WIRE, because when active it won't
64 * behave as it should.
65 * 63 *
66 * REVISIT should changing those two modes be privileged? 64 * - CS_HIGH ... this device will be active when it shouldn't be
65 * - 3WIRE ... when active, it won't behave as it should
66 * - NO_CS ... there will be no explicit message boundaries; this
67 * is completely incompatible with the shared bus model
68 * - READY ... transfers may proceed when they shouldn't.
69 *
70 * REVISIT should changing those flags be privileged?
67 */ 71 */
68#define SPI_MODE_MASK (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH \ 72#define SPI_MODE_MASK (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH \
69 | SPI_LSB_FIRST | SPI_3WIRE | SPI_LOOP) 73 | SPI_LSB_FIRST | SPI_3WIRE | SPI_LOOP \
74 | SPI_NO_CS | SPI_READY)
70 75
71struct spidev_data { 76struct spidev_data {
72 dev_t devt; 77 dev_t devt;
diff --git a/drivers/video/atafb.c b/drivers/video/atafb.c
index 018850c116c6..497ff8af03ed 100644
--- a/drivers/video/atafb.c
+++ b/drivers/video/atafb.c
@@ -2414,7 +2414,10 @@ static int atafb_get_fix(struct fb_fix_screeninfo *fix, struct fb_info *info)
2414 if (err) 2414 if (err)
2415 return err; 2415 return err;
2416 memset(fix, 0, sizeof(struct fb_fix_screeninfo)); 2416 memset(fix, 0, sizeof(struct fb_fix_screeninfo));
2417 return fbhw->encode_fix(fix, &par); 2417 mutex_lock(&info->mm_lock);
2418 err = fbhw->encode_fix(fix, &par);
2419 mutex_unlock(&info->mm_lock);
2420 return err;
2418} 2421}
2419 2422
2420static int atafb_get_var(struct fb_var_screeninfo *var, struct fb_info *info) 2423static int atafb_get_var(struct fb_var_screeninfo *var, struct fb_info *info)
@@ -2743,7 +2746,9 @@ static int atafb_set_par(struct fb_info *info)
2743 2746
2744 /* Decode wanted screen parameters */ 2747 /* Decode wanted screen parameters */
2745 fbhw->decode_var(&info->var, par); 2748 fbhw->decode_var(&info->var, par);
2749 mutex_lock(&info->mm_lock);
2746 fbhw->encode_fix(&info->fix, par); 2750 fbhw->encode_fix(&info->fix, par);
2751 mutex_unlock(&info->mm_lock);
2747 2752
2748 /* Set new videomode */ 2753 /* Set new videomode */
2749 ata_set_par(par); 2754 ata_set_par(par);
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index 5afd64482f55..cb88394ba995 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -270,7 +270,9 @@ static int atmel_lcdfb_alloc_video_memory(struct atmel_lcdfb_info *sinfo)
270 270
271 smem_len = (var->xres_virtual * var->yres_virtual 271 smem_len = (var->xres_virtual * var->yres_virtual
272 * ((var->bits_per_pixel + 7) / 8)); 272 * ((var->bits_per_pixel + 7) / 8));
273 mutex_lock(&info->mm_lock);
273 info->fix.smem_len = max(smem_len, sinfo->smem_len); 274 info->fix.smem_len = max(smem_len, sinfo->smem_len);
275 mutex_unlock(&info->mm_lock);
274 276
275 info->screen_base = dma_alloc_writecombine(info->device, info->fix.smem_len, 277 info->screen_base = dma_alloc_writecombine(info->device, info->fix.smem_len,
276 (dma_addr_t *)&info->fix.smem_start, GFP_KERNEL); 278 (dma_addr_t *)&info->fix.smem_start, GFP_KERNEL);
diff --git a/drivers/video/aty/atyfb.h b/drivers/video/aty/atyfb.h
index 7691e73823d3..1f39a62f899b 100644
--- a/drivers/video/aty/atyfb.h
+++ b/drivers/video/aty/atyfb.h
@@ -187,6 +187,8 @@ struct atyfb_par {
187 int mtrr_reg; 187 int mtrr_reg;
188#endif 188#endif
189 u32 mem_cntl; 189 u32 mem_cntl;
190 struct crtc saved_crtc;
191 union aty_pll saved_pll;
190}; 192};
191 193
192 /* 194 /*
@@ -217,6 +219,7 @@ struct atyfb_par {
217#define M64F_XL_DLL 0x00080000 219#define M64F_XL_DLL 0x00080000
218#define M64F_MFB_FORCE_4 0x00100000 220#define M64F_MFB_FORCE_4 0x00100000
219#define M64F_HW_TRIPLE 0x00200000 221#define M64F_HW_TRIPLE 0x00200000
222#define M64F_XL_MEM 0x00400000
220 /* 223 /*
221 * Register access 224 * Register access
222 */ 225 */
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index 1207c208a30b..63d3739d43a8 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -66,6 +66,8 @@
66#include <linux/spinlock.h> 66#include <linux/spinlock.h>
67#include <linux/wait.h> 67#include <linux/wait.h>
68#include <linux/backlight.h> 68#include <linux/backlight.h>
69#include <linux/reboot.h>
70#include <linux/dmi.h>
69 71
70#include <asm/io.h> 72#include <asm/io.h>
71#include <linux/uaccess.h> 73#include <linux/uaccess.h>
@@ -249,8 +251,6 @@ static int aty_init(struct fb_info *info);
249static int store_video_par(char *videopar, unsigned char m64_num); 251static int store_video_par(char *videopar, unsigned char m64_num);
250#endif 252#endif
251 253
252static struct crtc saved_crtc;
253static union aty_pll saved_pll;
254static void aty_get_crtc(const struct atyfb_par *par, struct crtc *crtc); 254static void aty_get_crtc(const struct atyfb_par *par, struct crtc *crtc);
255 255
256static void aty_set_crtc(const struct atyfb_par *par, const struct crtc *crtc); 256static void aty_set_crtc(const struct atyfb_par *par, const struct crtc *crtc);
@@ -261,6 +261,8 @@ static void set_off_pitch(struct atyfb_par *par, const struct fb_info *info);
261static int read_aty_sense(const struct atyfb_par *par); 261static int read_aty_sense(const struct atyfb_par *par);
262#endif 262#endif
263 263
264static DEFINE_MUTEX(reboot_lock);
265static struct fb_info *reboot_info;
264 266
265 /* 267 /*
266 * Interface used by the world 268 * Interface used by the world
@@ -361,8 +363,8 @@ static unsigned long phys_guiregbase[FB_MAX] __devinitdata = { 0, };
361#define ATI_CHIP_264GTPRO (ATI_MODERN_SET | M64F_SDRAM_MAGIC_PLL | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D) 363#define ATI_CHIP_264GTPRO (ATI_MODERN_SET | M64F_SDRAM_MAGIC_PLL | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D)
362#define ATI_CHIP_264LTPRO (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D) 364#define ATI_CHIP_264LTPRO (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D)
363 365
364#define ATI_CHIP_264XL (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D | M64F_XL_DLL | M64F_MFB_FORCE_4) 366#define ATI_CHIP_264XL (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D | M64F_XL_DLL | M64F_MFB_FORCE_4 | M64F_XL_MEM)
365#define ATI_CHIP_MOBILITY (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D | M64F_XL_DLL | M64F_MFB_FORCE_4 | M64F_MOBIL_BUS) 367#define ATI_CHIP_MOBILITY (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D | M64F_XL_DLL | M64F_MFB_FORCE_4 | M64F_XL_MEM | M64F_MOBIL_BUS)
366 368
367static struct { 369static struct {
368 u16 pci_id; 370 u16 pci_id;
@@ -539,6 +541,7 @@ static char ram_edo[] __devinitdata = "EDO";
539static char ram_sdram[] __devinitdata = "SDRAM (1:1)"; 541static char ram_sdram[] __devinitdata = "SDRAM (1:1)";
540static char ram_sgram[] __devinitdata = "SGRAM (1:1)"; 542static char ram_sgram[] __devinitdata = "SGRAM (1:1)";
541static char ram_sdram32[] __devinitdata = "SDRAM (2:1) (32-bit)"; 543static char ram_sdram32[] __devinitdata = "SDRAM (2:1) (32-bit)";
544static char ram_wram[] __devinitdata = "WRAM";
542static char ram_off[] __devinitdata = "OFF"; 545static char ram_off[] __devinitdata = "OFF";
543#endif /* CONFIG_FB_ATY_CT */ 546#endif /* CONFIG_FB_ATY_CT */
544 547
@@ -553,6 +556,10 @@ static char *aty_gx_ram[8] __devinitdata = {
553#ifdef CONFIG_FB_ATY_CT 556#ifdef CONFIG_FB_ATY_CT
554static char *aty_ct_ram[8] __devinitdata = { 557static char *aty_ct_ram[8] __devinitdata = {
555 ram_off, ram_dram, ram_edo, ram_edo, 558 ram_off, ram_dram, ram_edo, ram_edo,
559 ram_sdram, ram_sgram, ram_wram, ram_resv
560};
561static char *aty_xl_ram[8] __devinitdata = {
562 ram_off, ram_dram, ram_edo, ram_edo,
556 ram_sdram, ram_sgram, ram_sdram32, ram_resv 563 ram_sdram, ram_sgram, ram_sdram32, ram_resv
557}; 564};
558#endif /* CONFIG_FB_ATY_CT */ 565#endif /* CONFIG_FB_ATY_CT */
@@ -760,6 +767,17 @@ static void aty_set_crtc(const struct atyfb_par *par, const struct crtc *crtc)
760#endif /* CONFIG_FB_ATY_GENERIC_LCD */ 767#endif /* CONFIG_FB_ATY_GENERIC_LCD */
761} 768}
762 769
770static u32 calc_line_length(struct atyfb_par *par, u32 vxres, u32 bpp)
771{
772 u32 line_length = vxres * bpp / 8;
773
774 if (par->ram_type == SGRAM ||
775 (!M64_HAS(XL_MEM) && par->ram_type == WRAM))
776 line_length = (line_length + 63) & ~63;
777
778 return line_length;
779}
780
763static int aty_var_to_crtc(const struct fb_info *info, 781static int aty_var_to_crtc(const struct fb_info *info,
764 const struct fb_var_screeninfo *var, struct crtc *crtc) 782 const struct fb_var_screeninfo *var, struct crtc *crtc)
765{ 783{
@@ -769,13 +787,14 @@ static int aty_var_to_crtc(const struct fb_info *info,
769 u32 h_total, h_disp, h_sync_strt, h_sync_end, h_sync_dly, h_sync_wid, h_sync_pol; 787 u32 h_total, h_disp, h_sync_strt, h_sync_end, h_sync_dly, h_sync_wid, h_sync_pol;
770 u32 v_total, v_disp, v_sync_strt, v_sync_end, v_sync_wid, v_sync_pol, c_sync; 788 u32 v_total, v_disp, v_sync_strt, v_sync_end, v_sync_wid, v_sync_pol, c_sync;
771 u32 pix_width, dp_pix_width, dp_chain_mask; 789 u32 pix_width, dp_pix_width, dp_chain_mask;
790 u32 line_length;
772 791
773 /* input */ 792 /* input */
774 xres = var->xres; 793 xres = (var->xres + 7) & ~7;
775 yres = var->yres; 794 yres = var->yres;
776 vxres = var->xres_virtual; 795 vxres = (var->xres_virtual + 7) & ~7;
777 vyres = var->yres_virtual; 796 vyres = var->yres_virtual;
778 xoffset = var->xoffset; 797 xoffset = (var->xoffset + 7) & ~7;
779 yoffset = var->yoffset; 798 yoffset = var->yoffset;
780 bpp = var->bits_per_pixel; 799 bpp = var->bits_per_pixel;
781 if (bpp == 16) 800 if (bpp == 16)
@@ -827,7 +846,9 @@ static int aty_var_to_crtc(const struct fb_info *info,
827 } else 846 } else
828 FAIL("invalid bpp"); 847 FAIL("invalid bpp");
829 848
830 if (vxres * vyres * bpp / 8 > info->fix.smem_len) 849 line_length = calc_line_length(par, vxres, bpp);
850
851 if (vyres * line_length > info->fix.smem_len)
831 FAIL("not enough video RAM"); 852 FAIL("not enough video RAM");
832 853
833 h_sync_pol = sync & FB_SYNC_HOR_HIGH_ACT ? 0 : 1; 854 h_sync_pol = sync & FB_SYNC_HOR_HIGH_ACT ? 0 : 1;
@@ -969,7 +990,9 @@ static int aty_var_to_crtc(const struct fb_info *info,
969 crtc->xoffset = xoffset; 990 crtc->xoffset = xoffset;
970 crtc->yoffset = yoffset; 991 crtc->yoffset = yoffset;
971 crtc->bpp = bpp; 992 crtc->bpp = bpp;
972 crtc->off_pitch = ((yoffset*vxres+xoffset)*bpp/64) | (vxres<<19); 993 crtc->off_pitch =
994 ((yoffset * line_length + xoffset * bpp / 8) / 8) |
995 ((line_length / bpp) << 22);
973 crtc->vline_crnt_vline = 0; 996 crtc->vline_crnt_vline = 0;
974 997
975 crtc->h_tot_disp = h_total | (h_disp<<16); 998 crtc->h_tot_disp = h_total | (h_disp<<16);
@@ -1394,7 +1417,9 @@ static int atyfb_set_par(struct fb_info *info)
1394 } 1417 }
1395 aty_st_8(DAC_MASK, 0xff, par); 1418 aty_st_8(DAC_MASK, 0xff, par);
1396 1419
1397 info->fix.line_length = var->xres_virtual * var->bits_per_pixel/8; 1420 info->fix.line_length = calc_line_length(par, var->xres_virtual,
1421 var->bits_per_pixel);
1422
1398 info->fix.visual = var->bits_per_pixel <= 8 ? 1423 info->fix.visual = var->bits_per_pixel <= 8 ?
1399 FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR; 1424 FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
1400 1425
@@ -1505,10 +1530,12 @@ static void set_off_pitch(struct atyfb_par *par, const struct fb_info *info)
1505{ 1530{
1506 u32 xoffset = info->var.xoffset; 1531 u32 xoffset = info->var.xoffset;
1507 u32 yoffset = info->var.yoffset; 1532 u32 yoffset = info->var.yoffset;
1508 u32 vxres = par->crtc.vxres; 1533 u32 line_length = info->fix.line_length;
1509 u32 bpp = info->var.bits_per_pixel; 1534 u32 bpp = info->var.bits_per_pixel;
1510 1535
1511 par->crtc.off_pitch = ((yoffset * vxres + xoffset) * bpp / 64) | (vxres << 19); 1536 par->crtc.off_pitch =
1537 ((yoffset * line_length + xoffset * bpp / 8) / 8) |
1538 ((line_length / bpp) << 22);
1512} 1539}
1513 1540
1514 1541
@@ -2201,7 +2228,7 @@ static void __devinit aty_calc_mem_refresh(struct atyfb_par *par, int xclk)
2201 const int *refresh_tbl; 2228 const int *refresh_tbl;
2202 int i, size; 2229 int i, size;
2203 2230
2204 if (IS_XL(par->pci_id) || IS_MOBILITY(par->pci_id)) { 2231 if (M64_HAS(XL_MEM)) {
2205 refresh_tbl = ragexl_tbl; 2232 refresh_tbl = ragexl_tbl;
2206 size = ARRAY_SIZE(ragexl_tbl); 2233 size = ARRAY_SIZE(ragexl_tbl);
2207 } else { 2234 } else {
@@ -2335,7 +2362,10 @@ static int __devinit aty_init(struct fb_info *info)
2335 par->pll_ops = &aty_pll_ct; 2362 par->pll_ops = &aty_pll_ct;
2336 par->bus_type = PCI; 2363 par->bus_type = PCI;
2337 par->ram_type = (aty_ld_le32(CNFG_STAT0, par) & 0x07); 2364 par->ram_type = (aty_ld_le32(CNFG_STAT0, par) & 0x07);
2338 ramname = aty_ct_ram[par->ram_type]; 2365 if (M64_HAS(XL_MEM))
2366 ramname = aty_xl_ram[par->ram_type];
2367 else
2368 ramname = aty_ct_ram[par->ram_type];
2339 /* for many chips, the mclk is 67 MHz for SDRAM, 63 MHz otherwise */ 2369 /* for many chips, the mclk is 67 MHz for SDRAM, 63 MHz otherwise */
2340 if (par->pll_limits.mclk == 67 && par->ram_type < SDRAM) 2370 if (par->pll_limits.mclk == 67 && par->ram_type < SDRAM)
2341 par->pll_limits.mclk = 63; 2371 par->pll_limits.mclk = 63;
@@ -2390,9 +2420,9 @@ static int __devinit aty_init(struct fb_info *info)
2390#endif /* CONFIG_FB_ATY_CT */ 2420#endif /* CONFIG_FB_ATY_CT */
2391 2421
2392 /* save previous video mode */ 2422 /* save previous video mode */
2393 aty_get_crtc(par, &saved_crtc); 2423 aty_get_crtc(par, &par->saved_crtc);
2394 if(par->pll_ops->get_pll) 2424 if(par->pll_ops->get_pll)
2395 par->pll_ops->get_pll(info, &saved_pll); 2425 par->pll_ops->get_pll(info, &par->saved_pll);
2396 2426
2397 par->mem_cntl = aty_ld_le32(MEM_CNTL, par); 2427 par->mem_cntl = aty_ld_le32(MEM_CNTL, par);
2398 gtb_memsize = M64_HAS(GTB_DSP); 2428 gtb_memsize = M64_HAS(GTB_DSP);
@@ -2667,8 +2697,8 @@ static int __devinit aty_init(struct fb_info *info)
2667 2697
2668aty_init_exit: 2698aty_init_exit:
2669 /* restore video mode */ 2699 /* restore video mode */
2670 aty_set_crtc(par, &saved_crtc); 2700 aty_set_crtc(par, &par->saved_crtc);
2671 par->pll_ops->set_pll(info, &saved_pll); 2701 par->pll_ops->set_pll(info, &par->saved_pll);
2672 2702
2673#ifdef CONFIG_MTRR 2703#ifdef CONFIG_MTRR
2674 if (par->mtrr_reg >= 0) { 2704 if (par->mtrr_reg >= 0) {
@@ -3502,6 +3532,11 @@ static int __devinit atyfb_pci_probe(struct pci_dev *pdev, const struct pci_devi
3502 par->mmap_map[1].prot_flag = _PAGE_E; 3532 par->mmap_map[1].prot_flag = _PAGE_E;
3503#endif /* __sparc__ */ 3533#endif /* __sparc__ */
3504 3534
3535 mutex_lock(&reboot_lock);
3536 if (!reboot_info)
3537 reboot_info = info;
3538 mutex_unlock(&reboot_lock);
3539
3505 return 0; 3540 return 0;
3506 3541
3507err_release_io: 3542err_release_io:
@@ -3614,8 +3649,8 @@ static void __devexit atyfb_remove(struct fb_info *info)
3614 struct atyfb_par *par = (struct atyfb_par *) info->par; 3649 struct atyfb_par *par = (struct atyfb_par *) info->par;
3615 3650
3616 /* restore video mode */ 3651 /* restore video mode */
3617 aty_set_crtc(par, &saved_crtc); 3652 aty_set_crtc(par, &par->saved_crtc);
3618 par->pll_ops->set_pll(info, &saved_pll); 3653 par->pll_ops->set_pll(info, &par->saved_pll);
3619 3654
3620 unregister_framebuffer(info); 3655 unregister_framebuffer(info);
3621 3656
@@ -3661,6 +3696,11 @@ static void __devexit atyfb_pci_remove(struct pci_dev *pdev)
3661{ 3696{
3662 struct fb_info *info = pci_get_drvdata(pdev); 3697 struct fb_info *info = pci_get_drvdata(pdev);
3663 3698
3699 mutex_lock(&reboot_lock);
3700 if (reboot_info == info)
3701 reboot_info = NULL;
3702 mutex_unlock(&reboot_lock);
3703
3664 atyfb_remove(info); 3704 atyfb_remove(info);
3665} 3705}
3666 3706
@@ -3808,6 +3848,56 @@ static int __init atyfb_setup(char *options)
3808} 3848}
3809#endif /* MODULE */ 3849#endif /* MODULE */
3810 3850
3851static int atyfb_reboot_notify(struct notifier_block *nb,
3852 unsigned long code, void *unused)
3853{
3854 struct atyfb_par *par;
3855
3856 if (code != SYS_RESTART)
3857 return NOTIFY_DONE;
3858
3859 mutex_lock(&reboot_lock);
3860
3861 if (!reboot_info)
3862 goto out;
3863
3864 if (!lock_fb_info(reboot_info))
3865 goto out;
3866
3867 par = reboot_info->par;
3868
3869 /*
3870 * HP OmniBook 500's BIOS doesn't like the state of the
3871 * hardware after atyfb has been used. Restore the hardware
3872 * to the original state to allow successful reboots.
3873 */
3874 aty_set_crtc(par, &par->saved_crtc);
3875 par->pll_ops->set_pll(reboot_info, &par->saved_pll);
3876
3877 unlock_fb_info(reboot_info);
3878 out:
3879 mutex_unlock(&reboot_lock);
3880
3881 return NOTIFY_DONE;
3882}
3883
3884static struct notifier_block atyfb_reboot_notifier = {
3885 .notifier_call = atyfb_reboot_notify,
3886};
3887
3888static const struct dmi_system_id atyfb_reboot_ids[] = {
3889 {
3890 .ident = "HP OmniBook 500",
3891 .matches = {
3892 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
3893 DMI_MATCH(DMI_PRODUCT_NAME, "HP OmniBook PC"),
3894 DMI_MATCH(DMI_PRODUCT_VERSION, "HP OmniBook 500 FA"),
3895 },
3896 },
3897
3898 { }
3899};
3900
3811static int __init atyfb_init(void) 3901static int __init atyfb_init(void)
3812{ 3902{
3813 int err1 = 1, err2 = 1; 3903 int err1 = 1, err2 = 1;
@@ -3826,11 +3916,20 @@ static int __init atyfb_init(void)
3826 err2 = atyfb_atari_probe(); 3916 err2 = atyfb_atari_probe();
3827#endif 3917#endif
3828 3918
3829 return (err1 && err2) ? -ENODEV : 0; 3919 if (err1 && err2)
3920 return -ENODEV;
3921
3922 if (dmi_check_system(atyfb_reboot_ids))
3923 register_reboot_notifier(&atyfb_reboot_notifier);
3924
3925 return 0;
3830} 3926}
3831 3927
3832static void __exit atyfb_exit(void) 3928static void __exit atyfb_exit(void)
3833{ 3929{
3930 if (dmi_check_system(atyfb_reboot_ids))
3931 unregister_reboot_notifier(&atyfb_reboot_notifier);
3932
3834#ifdef CONFIG_PCI 3933#ifdef CONFIG_PCI
3835 pci_unregister_driver(&atyfb_driver); 3934 pci_unregister_driver(&atyfb_driver);
3836#endif 3935#endif
diff --git a/drivers/video/aty/mach64_accel.c b/drivers/video/aty/mach64_accel.c
index 0cc9724e61a2..51fcc0a2c94a 100644
--- a/drivers/video/aty/mach64_accel.c
+++ b/drivers/video/aty/mach64_accel.c
@@ -63,14 +63,17 @@ static void reset_GTC_3D_engine(const struct atyfb_par *par)
63void aty_init_engine(struct atyfb_par *par, struct fb_info *info) 63void aty_init_engine(struct atyfb_par *par, struct fb_info *info)
64{ 64{
65 u32 pitch_value; 65 u32 pitch_value;
66 u32 vxres;
66 67
67 /* determine modal information from global mode structure */ 68 /* determine modal information from global mode structure */
68 pitch_value = info->var.xres_virtual; 69 pitch_value = info->fix.line_length / (info->var.bits_per_pixel / 8);
70 vxres = info->var.xres_virtual;
69 71
70 if (info->var.bits_per_pixel == 24) { 72 if (info->var.bits_per_pixel == 24) {
71 /* In 24 bpp, the engine is in 8 bpp - this requires that all */ 73 /* In 24 bpp, the engine is in 8 bpp - this requires that all */
72 /* horizontal coordinates and widths must be adjusted */ 74 /* horizontal coordinates and widths must be adjusted */
73 pitch_value *= 3; 75 pitch_value *= 3;
76 vxres *= 3;
74 } 77 }
75 78
76 /* On GTC (RagePro), we need to reset the 3D engine before */ 79 /* On GTC (RagePro), we need to reset the 3D engine before */
@@ -133,7 +136,7 @@ void aty_init_engine(struct atyfb_par *par, struct fb_info *info)
133 aty_st_le32(SC_LEFT, 0, par); 136 aty_st_le32(SC_LEFT, 0, par);
134 aty_st_le32(SC_TOP, 0, par); 137 aty_st_le32(SC_TOP, 0, par);
135 aty_st_le32(SC_BOTTOM, par->crtc.vyres - 1, par); 138 aty_st_le32(SC_BOTTOM, par->crtc.vyres - 1, par);
136 aty_st_le32(SC_RIGHT, pitch_value - 1, par); 139 aty_st_le32(SC_RIGHT, vxres - 1, par);
137 140
138 /* set background color to minimum value (usually BLACK) */ 141 /* set background color to minimum value (usually BLACK) */
139 aty_st_le32(DP_BKGD_CLR, 0, par); 142 aty_st_le32(DP_BKGD_CLR, 0, par);
diff --git a/drivers/video/backlight/tdo24m.c b/drivers/video/backlight/tdo24m.c
index 1dae7f8f3c6b..51422fc4f606 100644
--- a/drivers/video/backlight/tdo24m.c
+++ b/drivers/video/backlight/tdo24m.c
@@ -356,7 +356,7 @@ static int __devinit tdo24m_probe(struct spi_device *spi)
356 lcd->power = FB_BLANK_POWERDOWN; 356 lcd->power = FB_BLANK_POWERDOWN;
357 lcd->mode = MODE_VGA; /* default to VGA */ 357 lcd->mode = MODE_VGA; /* default to VGA */
358 358
359 lcd->buf = kmalloc(TDO24M_SPI_BUFF_SIZE, sizeof(GFP_KERNEL)); 359 lcd->buf = kmalloc(TDO24M_SPI_BUFF_SIZE, GFP_KERNEL);
360 if (lcd->buf == NULL) { 360 if (lcd->buf == NULL) {
361 kfree(lcd); 361 kfree(lcd);
362 return -ENOMEM; 362 return -ENOMEM;
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index f8a09bf8d0cd..53ea05645ff8 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1310,8 +1310,6 @@ static long fb_compat_ioctl(struct file *file, unsigned int cmd,
1310 1310
1311static int 1311static int
1312fb_mmap(struct file *file, struct vm_area_struct * vma) 1312fb_mmap(struct file *file, struct vm_area_struct * vma)
1313__acquires(&info->lock)
1314__releases(&info->lock)
1315{ 1313{
1316 int fbidx = iminor(file->f_path.dentry->d_inode); 1314 int fbidx = iminor(file->f_path.dentry->d_inode);
1317 struct fb_info *info = registered_fb[fbidx]; 1315 struct fb_info *info = registered_fb[fbidx];
@@ -1325,16 +1323,14 @@ __releases(&info->lock)
1325 off = vma->vm_pgoff << PAGE_SHIFT; 1323 off = vma->vm_pgoff << PAGE_SHIFT;
1326 if (!fb) 1324 if (!fb)
1327 return -ENODEV; 1325 return -ENODEV;
1326 mutex_lock(&info->mm_lock);
1328 if (fb->fb_mmap) { 1327 if (fb->fb_mmap) {
1329 int res; 1328 int res;
1330 mutex_lock(&info->lock);
1331 res = fb->fb_mmap(info, vma); 1329 res = fb->fb_mmap(info, vma);
1332 mutex_unlock(&info->lock); 1330 mutex_unlock(&info->mm_lock);
1333 return res; 1331 return res;
1334 } 1332 }
1335 1333
1336 mutex_lock(&info->lock);
1337
1338 /* frame buffer memory */ 1334 /* frame buffer memory */
1339 start = info->fix.smem_start; 1335 start = info->fix.smem_start;
1340 len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.smem_len); 1336 len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.smem_len);
@@ -1342,13 +1338,13 @@ __releases(&info->lock)
1342 /* memory mapped io */ 1338 /* memory mapped io */
1343 off -= len; 1339 off -= len;
1344 if (info->var.accel_flags) { 1340 if (info->var.accel_flags) {
1345 mutex_unlock(&info->lock); 1341 mutex_unlock(&info->mm_lock);
1346 return -EINVAL; 1342 return -EINVAL;
1347 } 1343 }
1348 start = info->fix.mmio_start; 1344 start = info->fix.mmio_start;
1349 len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.mmio_len); 1345 len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.mmio_len);
1350 } 1346 }
1351 mutex_unlock(&info->lock); 1347 mutex_unlock(&info->mm_lock);
1352 start &= PAGE_MASK; 1348 start &= PAGE_MASK;
1353 if ((vma->vm_end - vma->vm_start + off) > len) 1349 if ((vma->vm_end - vma->vm_start + off) > len)
1354 return -EINVAL; 1350 return -EINVAL;
@@ -1518,6 +1514,7 @@ register_framebuffer(struct fb_info *fb_info)
1518 break; 1514 break;
1519 fb_info->node = i; 1515 fb_info->node = i;
1520 mutex_init(&fb_info->lock); 1516 mutex_init(&fb_info->lock);
1517 mutex_init(&fb_info->mm_lock);
1521 1518
1522 fb_info->dev = device_create(fb_class, fb_info->device, 1519 fb_info->dev = device_create(fb_class, fb_info->device,
1523 MKDEV(FB_MAJOR, i), NULL, "fb%d", i); 1520 MKDEV(FB_MAJOR, i), NULL, "fb%d", i);
diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c
index f153c581cbd7..0bf2190928d0 100644
--- a/drivers/video/fsl-diu-fb.c
+++ b/drivers/video/fsl-diu-fb.c
@@ -750,24 +750,26 @@ static void update_lcdc(struct fb_info *info)
750static int map_video_memory(struct fb_info *info) 750static int map_video_memory(struct fb_info *info)
751{ 751{
752 phys_addr_t phys; 752 phys_addr_t phys;
753 u32 smem_len = info->fix.line_length * info->var.yres_virtual;
753 754
754 pr_debug("info->var.xres_virtual = %d\n", info->var.xres_virtual); 755 pr_debug("info->var.xres_virtual = %d\n", info->var.xres_virtual);
755 pr_debug("info->var.yres_virtual = %d\n", info->var.yres_virtual); 756 pr_debug("info->var.yres_virtual = %d\n", info->var.yres_virtual);
756 pr_debug("info->fix.line_length = %d\n", info->fix.line_length); 757 pr_debug("info->fix.line_length = %d\n", info->fix.line_length);
758 pr_debug("MAP_VIDEO_MEMORY: smem_len = %u\n", smem_len);
757 759
758 info->fix.smem_len = info->fix.line_length * info->var.yres_virtual; 760 info->screen_base = fsl_diu_alloc(smem_len, &phys);
759 pr_debug("MAP_VIDEO_MEMORY: smem_len = %d\n", info->fix.smem_len);
760 info->screen_base = fsl_diu_alloc(info->fix.smem_len, &phys);
761 if (info->screen_base == NULL) { 761 if (info->screen_base == NULL) {
762 printk(KERN_ERR "Unable to allocate fb memory\n"); 762 printk(KERN_ERR "Unable to allocate fb memory\n");
763 return -ENOMEM; 763 return -ENOMEM;
764 } 764 }
765 mutex_lock(&info->mm_lock);
765 info->fix.smem_start = (unsigned long) phys; 766 info->fix.smem_start = (unsigned long) phys;
767 info->fix.smem_len = smem_len;
768 mutex_unlock(&info->mm_lock);
766 info->screen_size = info->fix.smem_len; 769 info->screen_size = info->fix.smem_len;
767 770
768 pr_debug("Allocated fb @ paddr=0x%08lx, size=%d.\n", 771 pr_debug("Allocated fb @ paddr=0x%08lx, size=%d.\n",
769 info->fix.smem_start, 772 info->fix.smem_start, info->fix.smem_len);
770 info->fix.smem_len);
771 pr_debug("screen base %p\n", info->screen_base); 773 pr_debug("screen base %p\n", info->screen_base);
772 774
773 return 0; 775 return 0;
@@ -776,9 +778,11 @@ static int map_video_memory(struct fb_info *info)
776static void unmap_video_memory(struct fb_info *info) 778static void unmap_video_memory(struct fb_info *info)
777{ 779{
778 fsl_diu_free(info->screen_base, info->fix.smem_len); 780 fsl_diu_free(info->screen_base, info->fix.smem_len);
781 mutex_lock(&info->mm_lock);
779 info->screen_base = NULL; 782 info->screen_base = NULL;
780 info->fix.smem_start = 0; 783 info->fix.smem_start = 0;
781 info->fix.smem_len = 0; 784 info->fix.smem_len = 0;
785 mutex_unlock(&info->mm_lock);
782} 786}
783 787
784/* 788/*
diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
index 2e940199fc89..71960672d721 100644
--- a/drivers/video/i810/i810_main.c
+++ b/drivers/video/i810/i810_main.c
@@ -1090,8 +1090,10 @@ static int encode_fix(struct fb_fix_screeninfo *fix, struct fb_info *info)
1090 memset(fix, 0, sizeof(struct fb_fix_screeninfo)); 1090 memset(fix, 0, sizeof(struct fb_fix_screeninfo));
1091 1091
1092 strcpy(fix->id, "I810"); 1092 strcpy(fix->id, "I810");
1093 mutex_lock(&info->mm_lock);
1093 fix->smem_start = par->fb.physical; 1094 fix->smem_start = par->fb.physical;
1094 fix->smem_len = par->fb.size; 1095 fix->smem_len = par->fb.size;
1096 mutex_unlock(&info->mm_lock);
1095 fix->type = FB_TYPE_PACKED_PIXELS; 1097 fix->type = FB_TYPE_PACKED_PIXELS;
1096 fix->type_aux = 0; 1098 fix->type_aux = 0;
1097 fix->xpanstep = 8; 1099 fix->xpanstep = 8;
diff --git a/drivers/video/matrox/matroxfb_base.c b/drivers/video/matrox/matroxfb_base.c
index 8e7a275df50c..59c3a2e14913 100644
--- a/drivers/video/matrox/matroxfb_base.c
+++ b/drivers/video/matrox/matroxfb_base.c
@@ -724,8 +724,10 @@ static void matroxfb_update_fix(WPMINFO2)
724 struct fb_fix_screeninfo *fix = &ACCESS_FBINFO(fbcon).fix; 724 struct fb_fix_screeninfo *fix = &ACCESS_FBINFO(fbcon).fix;
725 DBG(__func__) 725 DBG(__func__)
726 726
727 mutex_lock(&ACCESS_FBINFO(fbcon).mm_lock);
727 fix->smem_start = ACCESS_FBINFO(video.base) + ACCESS_FBINFO(curr.ydstorg.bytes); 728 fix->smem_start = ACCESS_FBINFO(video.base) + ACCESS_FBINFO(curr.ydstorg.bytes);
728 fix->smem_len = ACCESS_FBINFO(video.len_usable) - ACCESS_FBINFO(curr.ydstorg.bytes); 729 fix->smem_len = ACCESS_FBINFO(video.len_usable) - ACCESS_FBINFO(curr.ydstorg.bytes);
730 mutex_unlock(&ACCESS_FBINFO(fbcon).mm_lock);
729} 731}
730 732
731static int matroxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) 733static int matroxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
@@ -2081,6 +2083,7 @@ static int matroxfb_probe(struct pci_dev* pdev, const struct pci_device_id* dumm
2081 spin_lock_init(&ACCESS_FBINFO(lock.accel)); 2083 spin_lock_init(&ACCESS_FBINFO(lock.accel));
2082 init_rwsem(&ACCESS_FBINFO(crtc2.lock)); 2084 init_rwsem(&ACCESS_FBINFO(crtc2.lock));
2083 init_rwsem(&ACCESS_FBINFO(altout.lock)); 2085 init_rwsem(&ACCESS_FBINFO(altout.lock));
2086 mutex_init(&ACCESS_FBINFO(fbcon).mm_lock);
2084 ACCESS_FBINFO(irq_flags) = 0; 2087 ACCESS_FBINFO(irq_flags) = 0;
2085 init_waitqueue_head(&ACCESS_FBINFO(crtc1.vsync.wait)); 2088 init_waitqueue_head(&ACCESS_FBINFO(crtc1.vsync.wait));
2086 init_waitqueue_head(&ACCESS_FBINFO(crtc2.vsync.wait)); 2089 init_waitqueue_head(&ACCESS_FBINFO(crtc2.vsync.wait));
diff --git a/drivers/video/matrox/matroxfb_crtc2.c b/drivers/video/matrox/matroxfb_crtc2.c
index 7ac4c5f6145d..909e10a11898 100644
--- a/drivers/video/matrox/matroxfb_crtc2.c
+++ b/drivers/video/matrox/matroxfb_crtc2.c
@@ -289,13 +289,16 @@ static int matroxfb_dh_release(struct fb_info* info, int user) {
289#undef m2info 289#undef m2info
290} 290}
291 291
292static void matroxfb_dh_init_fix(struct matroxfb_dh_fb_info *m2info) { 292static void matroxfb_dh_init_fix(struct matroxfb_dh_fb_info *m2info)
293{
293 struct fb_fix_screeninfo *fix = &m2info->fbcon.fix; 294 struct fb_fix_screeninfo *fix = &m2info->fbcon.fix;
294 295
295 strcpy(fix->id, "MATROX DH"); 296 strcpy(fix->id, "MATROX DH");
296 297
298 mutex_lock(&m2info->fbcon.mm_lock);
297 fix->smem_start = m2info->video.base; 299 fix->smem_start = m2info->video.base;
298 fix->smem_len = m2info->video.len_usable; 300 fix->smem_len = m2info->video.len_usable;
301 mutex_unlock(&m2info->fbcon.mm_lock);
299 fix->ypanstep = 1; 302 fix->ypanstep = 1;
300 fix->ywrapstep = 0; 303 fix->ywrapstep = 0;
301 fix->xpanstep = 8; /* TBD */ 304 fix->xpanstep = 8; /* TBD */
diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c
index b7af5256e887..567fb944bd2a 100644
--- a/drivers/video/mx3fb.c
+++ b/drivers/video/mx3fb.c
@@ -669,7 +669,7 @@ static uint32_t bpp_to_pixfmt(int bpp)
669} 669}
670 670
671static int mx3fb_blank(int blank, struct fb_info *fbi); 671static int mx3fb_blank(int blank, struct fb_info *fbi);
672static int mx3fb_map_video_memory(struct fb_info *fbi); 672static int mx3fb_map_video_memory(struct fb_info *fbi, unsigned int mem_len);
673static int mx3fb_unmap_video_memory(struct fb_info *fbi); 673static int mx3fb_unmap_video_memory(struct fb_info *fbi);
674 674
675/** 675/**
@@ -742,8 +742,7 @@ static int mx3fb_set_par(struct fb_info *fbi)
742 if (fbi->fix.smem_start) 742 if (fbi->fix.smem_start)
743 mx3fb_unmap_video_memory(fbi); 743 mx3fb_unmap_video_memory(fbi);
744 744
745 fbi->fix.smem_len = mem_len; 745 if (mx3fb_map_video_memory(fbi, mem_len) < 0) {
746 if (mx3fb_map_video_memory(fbi) < 0) {
747 mutex_unlock(&mx3_fbi->mutex); 746 mutex_unlock(&mx3_fbi->mutex);
748 return -ENOMEM; 747 return -ENOMEM;
749 } 748 }
@@ -1198,6 +1197,7 @@ static int mx3fb_resume(struct platform_device *pdev)
1198/** 1197/**
1199 * mx3fb_map_video_memory() - allocates the DRAM memory for the frame buffer. 1198 * mx3fb_map_video_memory() - allocates the DRAM memory for the frame buffer.
1200 * @fbi: framebuffer information pointer 1199 * @fbi: framebuffer information pointer
1200 * @mem_len: length of mapped memory
1201 * @return: Error code indicating success or failure 1201 * @return: Error code indicating success or failure
1202 * 1202 *
1203 * This buffer is remapped into a non-cached, non-buffered, memory region to 1203 * This buffer is remapped into a non-cached, non-buffered, memory region to
@@ -1205,23 +1205,26 @@ static int mx3fb_resume(struct platform_device *pdev)
1205 * area is remapped, all virtual memory access to the video memory should occur 1205 * area is remapped, all virtual memory access to the video memory should occur
1206 * at the new region. 1206 * at the new region.
1207 */ 1207 */
1208static int mx3fb_map_video_memory(struct fb_info *fbi) 1208static int mx3fb_map_video_memory(struct fb_info *fbi, unsigned int mem_len)
1209{ 1209{
1210 int retval = 0; 1210 int retval = 0;
1211 dma_addr_t addr; 1211 dma_addr_t addr;
1212 1212
1213 fbi->screen_base = dma_alloc_writecombine(fbi->device, 1213 fbi->screen_base = dma_alloc_writecombine(fbi->device,
1214 fbi->fix.smem_len, 1214 mem_len,
1215 &addr, GFP_DMA); 1215 &addr, GFP_DMA);
1216 1216
1217 if (!fbi->screen_base) { 1217 if (!fbi->screen_base) {
1218 dev_err(fbi->device, "Cannot allocate %u bytes framebuffer memory\n", 1218 dev_err(fbi->device, "Cannot allocate %u bytes framebuffer memory\n",
1219 fbi->fix.smem_len); 1219 mem_len);
1220 retval = -EBUSY; 1220 retval = -EBUSY;
1221 goto err0; 1221 goto err0;
1222 } 1222 }
1223 1223
1224 mutex_lock(&fbi->mm_lock);
1224 fbi->fix.smem_start = addr; 1225 fbi->fix.smem_start = addr;
1226 fbi->fix.smem_len = mem_len;
1227 mutex_unlock(&fbi->mm_lock);
1225 1228
1226 dev_dbg(fbi->device, "allocated fb @ p=0x%08x, v=0x%p, size=%d.\n", 1229 dev_dbg(fbi->device, "allocated fb @ p=0x%08x, v=0x%p, size=%d.\n",
1227 (uint32_t) fbi->fix.smem_start, fbi->screen_base, fbi->fix.smem_len); 1230 (uint32_t) fbi->fix.smem_start, fbi->screen_base, fbi->fix.smem_len);
@@ -1251,8 +1254,10 @@ static int mx3fb_unmap_video_memory(struct fb_info *fbi)
1251 fbi->screen_base, fbi->fix.smem_start); 1254 fbi->screen_base, fbi->fix.smem_start);
1252 1255
1253 fbi->screen_base = 0; 1256 fbi->screen_base = 0;
1257 mutex_lock(&fbi->mm_lock);
1254 fbi->fix.smem_start = 0; 1258 fbi->fix.smem_start = 0;
1255 fbi->fix.smem_len = 0; 1259 fbi->fix.smem_len = 0;
1260 mutex_unlock(&fbi->mm_lock);
1256 return 0; 1261 return 0;
1257} 1262}
1258 1263
diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c
index 060d72fe57cb..4ea99bfc37b4 100644
--- a/drivers/video/omap/omapfb_main.c
+++ b/drivers/video/omap/omapfb_main.c
@@ -393,8 +393,10 @@ static void set_fb_fix(struct fb_info *fbi)
393 393
394 rg = &plane->fbdev->mem_desc.region[plane->idx]; 394 rg = &plane->fbdev->mem_desc.region[plane->idx];
395 fbi->screen_base = rg->vaddr; 395 fbi->screen_base = rg->vaddr;
396 mutex_lock(&fbi->mm_lock);
396 fix->smem_start = rg->paddr; 397 fix->smem_start = rg->paddr;
397 fix->smem_len = rg->size; 398 fix->smem_len = rg->size;
399 mutex_unlock(&fbi->mm_lock);
398 400
399 fix->type = FB_TYPE_PACKED_PIXELS; 401 fix->type = FB_TYPE_PACKED_PIXELS;
400 bpp = var->bits_per_pixel; 402 bpp = var->bits_per_pixel;
@@ -886,8 +888,10 @@ static int omapfb_setup_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
886 * plane memory is dealloce'd, the other 888 * plane memory is dealloce'd, the other
887 * screen parameters in var / fix are invalid. 889 * screen parameters in var / fix are invalid.
888 */ 890 */
891 mutex_lock(&fbi->mm_lock);
889 fbi->fix.smem_start = 0; 892 fbi->fix.smem_start = 0;
890 fbi->fix.smem_len = 0; 893 fbi->fix.smem_len = 0;
894 mutex_unlock(&fbi->mm_lock);
891 } 895 }
892 } 896 }
893 } 897 }
diff --git a/drivers/video/platinumfb.c b/drivers/video/platinumfb.c
index 03b3670130a0..bacfabd9ce16 100644
--- a/drivers/video/platinumfb.c
+++ b/drivers/video/platinumfb.c
@@ -141,7 +141,9 @@ static int platinumfb_set_par (struct fb_info *info)
141 offset = 0x10; 141 offset = 0x10;
142 142
143 info->screen_base = pinfo->frame_buffer + init->fb_offset + offset; 143 info->screen_base = pinfo->frame_buffer + init->fb_offset + offset;
144 mutex_lock(&info->mm_lock);
144 info->fix.smem_start = (pinfo->frame_buffer_phys) + init->fb_offset + offset; 145 info->fix.smem_start = (pinfo->frame_buffer_phys) + init->fb_offset + offset;
146 mutex_unlock(&info->mm_lock);
145 info->fix.visual = (pinfo->cmode == CMODE_8) ? 147 info->fix.visual = (pinfo->cmode == CMODE_8) ?
146 FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR; 148 FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
147 info->fix.line_length = vmode_attrs[pinfo->vmode-1].hres * (1<<pinfo->cmode) 149 info->fix.line_length = vmode_attrs[pinfo->vmode-1].hres * (1<<pinfo->cmode)
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c
index 0889d50c3288..6506117c134b 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/pxafb.c
@@ -815,8 +815,10 @@ static int overlayfb_map_video_memory(struct pxafb_layer *ofb)
815 ofb->video_mem_phys = virt_to_phys(ofb->video_mem); 815 ofb->video_mem_phys = virt_to_phys(ofb->video_mem);
816 ofb->video_mem_size = size; 816 ofb->video_mem_size = size;
817 817
818 mutex_lock(&ofb->fb.mm_lock);
818 ofb->fb.fix.smem_start = ofb->video_mem_phys; 819 ofb->fb.fix.smem_start = ofb->video_mem_phys;
819 ofb->fb.fix.smem_len = ofb->fb.fix.line_length * var->yres_virtual; 820 ofb->fb.fix.smem_len = ofb->fb.fix.line_length * var->yres_virtual;
821 mutex_unlock(&ofb->fb.mm_lock);
820 ofb->fb.screen_base = ofb->video_mem; 822 ofb->fb.screen_base = ofb->video_mem;
821 return 0; 823 return 0;
822} 824}
diff --git a/drivers/video/sh7760fb.c b/drivers/video/sh7760fb.c
index 653bdfee3057..9f6d6e61f0cc 100644
--- a/drivers/video/sh7760fb.c
+++ b/drivers/video/sh7760fb.c
@@ -120,18 +120,6 @@ static int sh7760_setcolreg (u_int regno,
120 return 0; 120 return 0;
121} 121}
122 122
123static void encode_fix(struct fb_fix_screeninfo *fix, struct fb_info *info,
124 unsigned long stride)
125{
126 memset(fix, 0, sizeof(struct fb_fix_screeninfo));
127 strcpy(fix->id, "sh7760-lcdc");
128
129 fix->smem_start = (unsigned long)info->screen_base;
130 fix->smem_len = info->screen_size;
131
132 fix->line_length = stride;
133}
134
135static int sh7760fb_get_color_info(struct device *dev, 123static int sh7760fb_get_color_info(struct device *dev,
136 u16 lddfr, int *bpp, int *gray) 124 u16 lddfr, int *bpp, int *gray)
137{ 125{
@@ -334,7 +322,8 @@ static int sh7760fb_set_par(struct fb_info *info)
334 322
335 iowrite32(ldsarl, par->base + LDSARL); /* mem for lower half of DSTN */ 323 iowrite32(ldsarl, par->base + LDSARL); /* mem for lower half of DSTN */
336 324
337 encode_fix(&info->fix, info, stride); 325 info->fix.line_length = stride;
326
338 sh7760fb_check_var(&info->var, info); 327 sh7760fb_check_var(&info->var, info);
339 328
340 sh7760fb_blank(FB_BLANK_UNBLANK, info); /* panel on! */ 329 sh7760fb_blank(FB_BLANK_UNBLANK, info); /* panel on! */
@@ -435,6 +424,8 @@ static int sh7760fb_alloc_mem(struct fb_info *info)
435 424
436 info->screen_base = fbmem; 425 info->screen_base = fbmem;
437 info->screen_size = vram; 426 info->screen_size = vram;
427 info->fix.smem_start = (unsigned long)info->screen_base;
428 info->fix.smem_len = info->screen_size;
438 429
439 return 0; 430 return 0;
440} 431}
@@ -520,6 +511,8 @@ static int __devinit sh7760fb_probe(struct platform_device *pdev)
520 info->var.transp.length = 0; 511 info->var.transp.length = 0;
521 info->var.transp.msb_right = 0; 512 info->var.transp.msb_right = 0;
522 513
514 strcpy(info->fix.id, "sh7760-lcdc");
515
523 /* set the DON2 bit now, before cmap allocation, as it will randomize 516 /* set the DON2 bit now, before cmap allocation, as it will randomize
524 * palette memory. 517 * palette memory.
525 */ 518 */
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index f10d2fbeda06..da983b720f08 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -17,6 +17,7 @@
17#include <linux/platform_device.h> 17#include <linux/platform_device.h>
18#include <linux/dma-mapping.h> 18#include <linux/dma-mapping.h>
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/vmalloc.h>
20#include <video/sh_mobile_lcdc.h> 21#include <video/sh_mobile_lcdc.h>
21#include <asm/atomic.h> 22#include <asm/atomic.h>
22 23
@@ -33,6 +34,7 @@ struct sh_mobile_lcdc_chan {
33 struct fb_info info; 34 struct fb_info info;
34 dma_addr_t dma_handle; 35 dma_addr_t dma_handle;
35 struct fb_deferred_io defio; 36 struct fb_deferred_io defio;
37 struct scatterlist *sglist;
36 unsigned long frame_end; 38 unsigned long frame_end;
37 wait_queue_head_t frame_end_wait; 39 wait_queue_head_t frame_end_wait;
38}; 40};
@@ -206,16 +208,38 @@ static void sh_mobile_lcdc_clk_on(struct sh_mobile_lcdc_priv *priv) {}
206static void sh_mobile_lcdc_clk_off(struct sh_mobile_lcdc_priv *priv) {} 208static void sh_mobile_lcdc_clk_off(struct sh_mobile_lcdc_priv *priv) {}
207#endif 209#endif
208 210
211static int sh_mobile_lcdc_sginit(struct fb_info *info,
212 struct list_head *pagelist)
213{
214 struct sh_mobile_lcdc_chan *ch = info->par;
215 unsigned int nr_pages_max = info->fix.smem_len >> PAGE_SHIFT;
216 struct page *page;
217 int nr_pages = 0;
218
219 sg_init_table(ch->sglist, nr_pages_max);
220
221 list_for_each_entry(page, pagelist, lru)
222 sg_set_page(&ch->sglist[nr_pages++], page, PAGE_SIZE, 0);
223
224 return nr_pages;
225}
226
209static void sh_mobile_lcdc_deferred_io(struct fb_info *info, 227static void sh_mobile_lcdc_deferred_io(struct fb_info *info,
210 struct list_head *pagelist) 228 struct list_head *pagelist)
211{ 229{
212 struct sh_mobile_lcdc_chan *ch = info->par; 230 struct sh_mobile_lcdc_chan *ch = info->par;
231 unsigned int nr_pages;
213 232
214 /* enable clocks before accessing hardware */ 233 /* enable clocks before accessing hardware */
215 sh_mobile_lcdc_clk_on(ch->lcdc); 234 sh_mobile_lcdc_clk_on(ch->lcdc);
216 235
236 nr_pages = sh_mobile_lcdc_sginit(info, pagelist);
237 dma_map_sg(info->dev, ch->sglist, nr_pages, DMA_TO_DEVICE);
238
217 /* trigger panel update */ 239 /* trigger panel update */
218 lcdc_write_chan(ch, LDSM2R, 1); 240 lcdc_write_chan(ch, LDSM2R, 1);
241
242 dma_unmap_sg(info->dev, ch->sglist, nr_pages, DMA_TO_DEVICE);
219} 243}
220 244
221static void sh_mobile_lcdc_deferred_io_touch(struct fb_info *info) 245static void sh_mobile_lcdc_deferred_io_touch(struct fb_info *info)
@@ -846,21 +870,31 @@ static int __init sh_mobile_lcdc_probe(struct platform_device *pdev)
846 } 870 }
847 871
848 for (i = 0; i < j; i++) { 872 for (i = 0; i < j; i++) {
849 error = register_framebuffer(&priv->ch[i].info); 873 struct sh_mobile_lcdc_chan *ch = priv->ch + i;
874
875 info = &ch->info;
876
877 if (info->fbdefio) {
878 priv->ch->sglist = vmalloc(sizeof(struct scatterlist) *
879 info->fix.smem_len >> PAGE_SHIFT);
880 if (!priv->ch->sglist) {
881 dev_err(&pdev->dev, "cannot allocate sglist\n");
882 goto err1;
883 }
884 }
885
886 error = register_framebuffer(info);
850 if (error < 0) 887 if (error < 0)
851 goto err1; 888 goto err1;
852 }
853 889
854 for (i = 0; i < j; i++) {
855 info = &priv->ch[i].info;
856 dev_info(info->dev, 890 dev_info(info->dev,
857 "registered %s/%s as %dx%d %dbpp.\n", 891 "registered %s/%s as %dx%d %dbpp.\n",
858 pdev->name, 892 pdev->name,
859 (priv->ch[i].cfg.chan == LCDC_CHAN_MAINLCD) ? 893 (ch->cfg.chan == LCDC_CHAN_MAINLCD) ?
860 "mainlcd" : "sublcd", 894 "mainlcd" : "sublcd",
861 (int) priv->ch[i].cfg.lcd_cfg.xres, 895 (int) ch->cfg.lcd_cfg.xres,
862 (int) priv->ch[i].cfg.lcd_cfg.yres, 896 (int) ch->cfg.lcd_cfg.yres,
863 priv->ch[i].cfg.bpp); 897 ch->cfg.bpp);
864 898
865 /* deferred io mode: disable clock to save power */ 899 /* deferred io mode: disable clock to save power */
866 if (info->fbdefio) 900 if (info->fbdefio)
@@ -892,6 +926,9 @@ static int sh_mobile_lcdc_remove(struct platform_device *pdev)
892 if (!info->device) 926 if (!info->device)
893 continue; 927 continue;
894 928
929 if (priv->ch[i].sglist)
930 vfree(priv->ch[i].sglist);
931
895 dma_free_coherent(&pdev->dev, info->fix.smem_len, 932 dma_free_coherent(&pdev->dev, info->fix.smem_len,
896 info->screen_base, priv->ch[i].dma_handle); 933 info->screen_base, priv->ch[i].dma_handle);
897 fb_dealloc_cmap(&info->cmap); 934 fb_dealloc_cmap(&info->cmap);
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c
index 7072d19080d5..fd33455389b8 100644
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/sis/sis_main.c
@@ -1847,8 +1847,10 @@ sisfb_get_fix(struct fb_fix_screeninfo *fix, int con, struct fb_info *info)
1847 1847
1848 strcpy(fix->id, ivideo->myid); 1848 strcpy(fix->id, ivideo->myid);
1849 1849
1850 mutex_lock(&info->mm_lock);
1850 fix->smem_start = ivideo->video_base + ivideo->video_offset; 1851 fix->smem_start = ivideo->video_base + ivideo->video_offset;
1851 fix->smem_len = ivideo->sisfb_mem; 1852 fix->smem_len = ivideo->sisfb_mem;
1853 mutex_unlock(&info->mm_lock);
1852 fix->type = FB_TYPE_PACKED_PIXELS; 1854 fix->type = FB_TYPE_PACKED_PIXELS;
1853 fix->type_aux = 0; 1855 fix->type_aux = 0;
1854 fix->visual = (ivideo->video_bpp == 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR; 1856 fix->visual = (ivideo->video_bpp == 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR;
diff --git a/drivers/video/sm501fb.c b/drivers/video/sm501fb.c
index eb5d73a06702..98f24f0ec00d 100644
--- a/drivers/video/sm501fb.c
+++ b/drivers/video/sm501fb.c
@@ -145,7 +145,7 @@ static inline void sm501fb_sync_regs(struct sm501fb_info *info)
145#define SM501_MEMF_ACCEL (8) 145#define SM501_MEMF_ACCEL (8)
146 146
147static int sm501_alloc_mem(struct sm501fb_info *inf, struct sm501_mem *mem, 147static int sm501_alloc_mem(struct sm501fb_info *inf, struct sm501_mem *mem,
148 unsigned int why, size_t size) 148 unsigned int why, size_t size, u32 smem_len)
149{ 149{
150 struct sm501fb_par *par; 150 struct sm501fb_par *par;
151 struct fb_info *fbi; 151 struct fb_info *fbi;
@@ -172,7 +172,7 @@ static int sm501_alloc_mem(struct sm501fb_info *inf, struct sm501_mem *mem,
172 if (ptr > 0) 172 if (ptr > 0)
173 ptr &= ~(PAGE_SIZE - 1); 173 ptr &= ~(PAGE_SIZE - 1);
174 174
175 if (fbi && ptr < fbi->fix.smem_len) 175 if (fbi && ptr < smem_len)
176 return -ENOMEM; 176 return -ENOMEM;
177 177
178 break; 178 break;
@@ -197,7 +197,7 @@ static int sm501_alloc_mem(struct sm501fb_info *inf, struct sm501_mem *mem,
197 197
198 case SM501_MEMF_ACCEL: 198 case SM501_MEMF_ACCEL:
199 fbi = inf->fb[HEAD_CRT]; 199 fbi = inf->fb[HEAD_CRT];
200 ptr = fbi ? fbi->fix.smem_len : 0; 200 ptr = fbi ? smem_len : 0;
201 201
202 fbi = inf->fb[HEAD_PANEL]; 202 fbi = inf->fb[HEAD_PANEL];
203 if (fbi) { 203 if (fbi) {
@@ -413,6 +413,7 @@ static int sm501fb_set_par_common(struct fb_info *info,
413 unsigned int mem_type; 413 unsigned int mem_type;
414 unsigned int clock_type; 414 unsigned int clock_type;
415 unsigned int head_addr; 415 unsigned int head_addr;
416 unsigned int smem_len;
416 417
417 dev_dbg(fbi->dev, "%s: %dx%d, bpp = %d, virtual %dx%d\n", 418 dev_dbg(fbi->dev, "%s: %dx%d, bpp = %d, virtual %dx%d\n",
418 __func__, var->xres, var->yres, var->bits_per_pixel, 419 __func__, var->xres, var->yres, var->bits_per_pixel,
@@ -453,18 +454,20 @@ static int sm501fb_set_par_common(struct fb_info *info,
453 454
454 /* allocate fb memory within 501 */ 455 /* allocate fb memory within 501 */
455 info->fix.line_length = (var->xres_virtual * var->bits_per_pixel)/8; 456 info->fix.line_length = (var->xres_virtual * var->bits_per_pixel)/8;
456 info->fix.smem_len = info->fix.line_length * var->yres_virtual; 457 smem_len = info->fix.line_length * var->yres_virtual;
457 458
458 dev_dbg(fbi->dev, "%s: line length = %u\n", __func__, 459 dev_dbg(fbi->dev, "%s: line length = %u\n", __func__,
459 info->fix.line_length); 460 info->fix.line_length);
460 461
461 if (sm501_alloc_mem(fbi, &par->screen, mem_type, 462 if (sm501_alloc_mem(fbi, &par->screen, mem_type, smem_len, smem_len)) {
462 info->fix.smem_len)) {
463 dev_err(fbi->dev, "no memory available\n"); 463 dev_err(fbi->dev, "no memory available\n");
464 return -ENOMEM; 464 return -ENOMEM;
465 } 465 }
466 466
467 mutex_lock(&info->mm_lock);
467 info->fix.smem_start = fbi->fbmem_res->start + par->screen.sm_addr; 468 info->fix.smem_start = fbi->fbmem_res->start + par->screen.sm_addr;
469 info->fix.smem_len = smem_len;
470 mutex_unlock(&info->mm_lock);
468 471
469 info->screen_base = fbi->fbmem + par->screen.sm_addr; 472 info->screen_base = fbi->fbmem + par->screen.sm_addr;
470 info->screen_size = info->fix.smem_len; 473 info->screen_size = info->fix.smem_len;
@@ -637,7 +640,8 @@ static int sm501fb_set_par_crt(struct fb_info *info)
637 if ((control & SM501_DC_CRT_CONTROL_SEL) == 0) { 640 if ((control & SM501_DC_CRT_CONTROL_SEL) == 0) {
638 /* the head is displaying panel data... */ 641 /* the head is displaying panel data... */
639 642
640 sm501_alloc_mem(fbi, &par->screen, SM501_MEMF_CRT, 0); 643 sm501_alloc_mem(fbi, &par->screen, SM501_MEMF_CRT, 0,
644 info->fix.smem_len);
641 goto out_update; 645 goto out_update;
642 } 646 }
643 647
@@ -1289,7 +1293,8 @@ static int sm501_init_cursor(struct fb_info *fbi, unsigned int reg_base)
1289 1293
1290 par->cursor_regs = info->regs + reg_base; 1294 par->cursor_regs = info->regs + reg_base;
1291 1295
1292 ret = sm501_alloc_mem(info, &par->cursor, SM501_MEMF_CURSOR, 1024); 1296 ret = sm501_alloc_mem(info, &par->cursor, SM501_MEMF_CURSOR, 1024,
1297 fbi->fix.smem_len);
1293 if (ret < 0) 1298 if (ret < 0)
1294 return ret; 1299 return ret;
1295 1300
diff --git a/drivers/video/w100fb.c b/drivers/video/w100fb.c
index d0674f1e3f10..8a141c2c637b 100644
--- a/drivers/video/w100fb.c
+++ b/drivers/video/w100fb.c
@@ -523,6 +523,7 @@ static int w100fb_set_par(struct fb_info *info)
523 info->fix.ywrapstep = 0; 523 info->fix.ywrapstep = 0;
524 info->fix.line_length = par->xres * BITS_PER_PIXEL / 8; 524 info->fix.line_length = par->xres * BITS_PER_PIXEL / 8;
525 525
526 mutex_lock(&info->mm_lock);
526 if ((par->xres*par->yres*BITS_PER_PIXEL/8) > (MEM_INT_SIZE+1)) { 527 if ((par->xres*par->yres*BITS_PER_PIXEL/8) > (MEM_INT_SIZE+1)) {
527 par->extmem_active = 1; 528 par->extmem_active = 1;
528 info->fix.smem_len = par->mach->mem->size+1; 529 info->fix.smem_len = par->mach->mem->size+1;
@@ -530,6 +531,7 @@ static int w100fb_set_par(struct fb_info *info)
530 par->extmem_active = 0; 531 par->extmem_active = 0;
531 info->fix.smem_len = MEM_INT_SIZE+1; 532 info->fix.smem_len = MEM_INT_SIZE+1;
532 } 533 }
534 mutex_unlock(&info->mm_lock);
533 535
534 w100fb_activate_var(par); 536 w100fb_activate_var(par);
535 } 537 }