diff options
author | David S. Miller <davem@davemloft.net> | 2012-05-20 21:53:04 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-05-20 21:53:04 -0400 |
commit | 17eea0df5f7068fc04959e655ef8f0a0ed097e19 (patch) | |
tree | d44b5cceb813dddfd1e62fe9f92556cf113d62fd /drivers | |
parent | 9b905fe68433378032b851c4d81a59187689fa52 (diff) | |
parent | 76e10d158efb6d4516018846f60c2ab5501900bc (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/acpi/bus.c | 4 | ||||
-rw-r--r-- | drivers/acpi/power.c | 9 | ||||
-rw-r--r-- | drivers/acpi/scan.c | 4 | ||||
-rw-r--r-- | drivers/block/DAC960.c | 23 | ||||
-rw-r--r-- | drivers/block/mtip32xx/mtip32xx.c | 4 | ||||
-rw-r--r-- | drivers/char/virtio_console.c | 7 | ||||
-rw-r--r-- | drivers/crypto/Kconfig | 1 | ||||
-rw-r--r-- | drivers/dma/at_hdmac.c | 4 | ||||
-rw-r--r-- | drivers/dma/ep93xx_dma.c | 4 | ||||
-rw-r--r-- | drivers/dma/pl330.c | 3 | ||||
-rw-r--r-- | drivers/md/dm-thin.c | 32 | ||||
-rw-r--r-- | drivers/md/md.c | 2 | ||||
-rw-r--r-- | drivers/md/raid10.c | 56 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/e1000/e1000_main.c | 6 | ||||
-rw-r--r-- | drivers/net/virtio_net.c | 2 | ||||
-rw-r--r-- | drivers/pci/pci-acpi.c | 2 | ||||
-rw-r--r-- | drivers/rtc/rtc-pl031.c | 18 | ||||
-rw-r--r-- | drivers/target/target_core_file.c | 22 | ||||
-rw-r--r-- | drivers/target/target_core_pr.c | 3 | ||||
-rw-r--r-- | drivers/virtio/virtio_balloon.c | 1 |
20 files changed, 140 insertions, 67 deletions
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 3263b68cdfa3..3188da3df8da 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c | |||
@@ -250,6 +250,10 @@ static int __acpi_bus_set_power(struct acpi_device *device, int state) | |||
250 | return -ENODEV; | 250 | return -ENODEV; |
251 | } | 251 | } |
252 | 252 | ||
253 | /* For D3cold we should execute _PS3, not _PS4. */ | ||
254 | if (state == ACPI_STATE_D3_COLD) | ||
255 | object_name[3] = '3'; | ||
256 | |||
253 | /* | 257 | /* |
254 | * Transition Power | 258 | * Transition Power |
255 | * ---------------- | 259 | * ---------------- |
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c index 330bb4d75852..0500f719f63e 100644 --- a/drivers/acpi/power.c +++ b/drivers/acpi/power.c | |||
@@ -660,7 +660,7 @@ int acpi_power_on_resources(struct acpi_device *device, int state) | |||
660 | 660 | ||
661 | int acpi_power_transition(struct acpi_device *device, int state) | 661 | int acpi_power_transition(struct acpi_device *device, int state) |
662 | { | 662 | { |
663 | int result; | 663 | int result = 0; |
664 | 664 | ||
665 | if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3_COLD)) | 665 | if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3_COLD)) |
666 | return -EINVAL; | 666 | return -EINVAL; |
@@ -679,8 +679,11 @@ int acpi_power_transition(struct acpi_device *device, int state) | |||
679 | * (e.g. so the device doesn't lose power while transitioning). Then, | 679 | * (e.g. so the device doesn't lose power while transitioning). Then, |
680 | * we dereference all power resources used in the current list. | 680 | * we dereference all power resources used in the current list. |
681 | */ | 681 | */ |
682 | result = acpi_power_on_list(&device->power.states[state].resources); | 682 | if (state < ACPI_STATE_D3_COLD) |
683 | if (!result) | 683 | result = acpi_power_on_list( |
684 | &device->power.states[state].resources); | ||
685 | |||
686 | if (!result && device->power.state < ACPI_STATE_D3_COLD) | ||
684 | acpi_power_off_list( | 687 | acpi_power_off_list( |
685 | &device->power.states[device->power.state].resources); | 688 | &device->power.states[device->power.state].resources); |
686 | 689 | ||
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 7417267e88fa..85cbfdccc97c 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c | |||
@@ -908,6 +908,10 @@ static int acpi_bus_get_power_flags(struct acpi_device *device) | |||
908 | device->power.states[ACPI_STATE_D3].flags.valid = 1; | 908 | device->power.states[ACPI_STATE_D3].flags.valid = 1; |
909 | device->power.states[ACPI_STATE_D3].power = 0; | 909 | device->power.states[ACPI_STATE_D3].power = 0; |
910 | 910 | ||
911 | /* Set D3cold's explicit_set flag if _PS3 exists. */ | ||
912 | if (device->power.states[ACPI_STATE_D3_HOT].flags.explicit_set) | ||
913 | device->power.states[ACPI_STATE_D3_COLD].flags.explicit_set = 1; | ||
914 | |||
911 | acpi_bus_init_power(device); | 915 | acpi_bus_init_power(device); |
912 | 916 | ||
913 | return 0; | 917 | return 0; |
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c index 8db9089127c5..9a13e889837e 100644 --- a/drivers/block/DAC960.c +++ b/drivers/block/DAC960.c | |||
@@ -6580,24 +6580,21 @@ static const struct file_operations dac960_user_command_proc_fops = { | |||
6580 | 6580 | ||
6581 | static void DAC960_CreateProcEntries(DAC960_Controller_T *Controller) | 6581 | static void DAC960_CreateProcEntries(DAC960_Controller_T *Controller) |
6582 | { | 6582 | { |
6583 | struct proc_dir_entry *StatusProcEntry; | ||
6584 | struct proc_dir_entry *ControllerProcEntry; | 6583 | struct proc_dir_entry *ControllerProcEntry; |
6585 | struct proc_dir_entry *UserCommandProcEntry; | ||
6586 | 6584 | ||
6587 | if (DAC960_ProcDirectoryEntry == NULL) { | 6585 | if (DAC960_ProcDirectoryEntry == NULL) { |
6588 | DAC960_ProcDirectoryEntry = proc_mkdir("rd", NULL); | 6586 | DAC960_ProcDirectoryEntry = proc_mkdir("rd", NULL); |
6589 | StatusProcEntry = proc_create("status", 0, | 6587 | proc_create("status", 0, DAC960_ProcDirectoryEntry, |
6590 | DAC960_ProcDirectoryEntry, | 6588 | &dac960_proc_fops); |
6591 | &dac960_proc_fops); | ||
6592 | } | 6589 | } |
6593 | 6590 | ||
6594 | sprintf(Controller->ControllerName, "c%d", Controller->ControllerNumber); | 6591 | sprintf(Controller->ControllerName, "c%d", Controller->ControllerNumber); |
6595 | ControllerProcEntry = proc_mkdir(Controller->ControllerName, | 6592 | ControllerProcEntry = proc_mkdir(Controller->ControllerName, |
6596 | DAC960_ProcDirectoryEntry); | 6593 | DAC960_ProcDirectoryEntry); |
6597 | proc_create_data("initial_status", 0, ControllerProcEntry, &dac960_initial_status_proc_fops, Controller); | 6594 | proc_create_data("initial_status", 0, ControllerProcEntry, &dac960_initial_status_proc_fops, Controller); |
6598 | proc_create_data("current_status", 0, ControllerProcEntry, &dac960_current_status_proc_fops, Controller); | 6595 | proc_create_data("current_status", 0, ControllerProcEntry, &dac960_current_status_proc_fops, Controller); |
6599 | UserCommandProcEntry = proc_create_data("user_command", S_IWUSR | S_IRUSR, ControllerProcEntry, &dac960_user_command_proc_fops, Controller); | 6596 | proc_create_data("user_command", S_IWUSR | S_IRUSR, ControllerProcEntry, &dac960_user_command_proc_fops, Controller); |
6600 | Controller->ControllerProcEntry = ControllerProcEntry; | 6597 | Controller->ControllerProcEntry = ControllerProcEntry; |
6601 | } | 6598 | } |
6602 | 6599 | ||
6603 | 6600 | ||
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 00f9fc992090..304000c3d433 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c | |||
@@ -2510,8 +2510,10 @@ static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd, | |||
2510 | up(&dd->port->cmd_slot); | 2510 | up(&dd->port->cmd_slot); |
2511 | return NULL; | 2511 | return NULL; |
2512 | } | 2512 | } |
2513 | if (unlikely(*tag < 0)) | 2513 | if (unlikely(*tag < 0)) { |
2514 | up(&dd->port->cmd_slot); | ||
2514 | return NULL; | 2515 | return NULL; |
2516 | } | ||
2515 | 2517 | ||
2516 | return dd->port->commands[*tag].sg; | 2518 | return dd->port->commands[*tag].sg; |
2517 | } | 2519 | } |
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index ddf86b6500b7..cdf2f5451c76 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c | |||
@@ -1895,6 +1895,13 @@ static int virtcons_restore(struct virtio_device *vdev) | |||
1895 | 1895 | ||
1896 | /* Get port open/close status on the host */ | 1896 | /* Get port open/close status on the host */ |
1897 | send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); | 1897 | send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); |
1898 | |||
1899 | /* | ||
1900 | * If a port was open at the time of suspending, we | ||
1901 | * have to let the host know that it's still open. | ||
1902 | */ | ||
1903 | if (port->guest_connected) | ||
1904 | send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1); | ||
1898 | } | 1905 | } |
1899 | return 0; | 1906 | return 0; |
1900 | } | 1907 | } |
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index ab9abb46d01a..dd414d9350ef 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig | |||
@@ -164,6 +164,7 @@ config CRYPTO_DEV_MV_CESA | |||
164 | select CRYPTO_ALGAPI | 164 | select CRYPTO_ALGAPI |
165 | select CRYPTO_AES | 165 | select CRYPTO_AES |
166 | select CRYPTO_BLKCIPHER2 | 166 | select CRYPTO_BLKCIPHER2 |
167 | select CRYPTO_HASH | ||
167 | help | 168 | help |
168 | This driver allows you to utilize the Cryptographic Engines and | 169 | This driver allows you to utilize the Cryptographic Engines and |
169 | Security Accelerator (CESA) which can be found on the Marvell Orion | 170 | Security Accelerator (CESA) which can be found on the Marvell Orion |
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 445fdf811695..bf0d7e4e345b 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c | |||
@@ -245,7 +245,9 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) | |||
245 | dev_vdbg(chan2dev(&atchan->chan_common), | 245 | dev_vdbg(chan2dev(&atchan->chan_common), |
246 | "descriptor %u complete\n", txd->cookie); | 246 | "descriptor %u complete\n", txd->cookie); |
247 | 247 | ||
248 | dma_cookie_complete(txd); | 248 | /* mark the descriptor as complete for non cyclic cases only */ |
249 | if (!atc_chan_is_cyclic(atchan)) | ||
250 | dma_cookie_complete(txd); | ||
249 | 251 | ||
250 | /* move children to free_list */ | 252 | /* move children to free_list */ |
251 | list_splice_init(&desc->tx_list, &atchan->free_list); | 253 | list_splice_init(&desc->tx_list, &atchan->free_list); |
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c index e6f133b78dc2..f6e9b572b998 100644 --- a/drivers/dma/ep93xx_dma.c +++ b/drivers/dma/ep93xx_dma.c | |||
@@ -703,7 +703,9 @@ static void ep93xx_dma_tasklet(unsigned long data) | |||
703 | desc = ep93xx_dma_get_active(edmac); | 703 | desc = ep93xx_dma_get_active(edmac); |
704 | if (desc) { | 704 | if (desc) { |
705 | if (desc->complete) { | 705 | if (desc->complete) { |
706 | dma_cookie_complete(&desc->txd); | 706 | /* mark descriptor complete for non cyclic case only */ |
707 | if (!test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) | ||
708 | dma_cookie_complete(&desc->txd); | ||
707 | list_splice_init(&edmac->active, &list); | 709 | list_splice_init(&edmac->active, &list); |
708 | } | 710 | } |
709 | callback = desc->txd.callback; | 711 | callback = desc->txd.callback; |
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 2ee6e23930ad..fa3fb21e60be 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c | |||
@@ -2322,7 +2322,8 @@ static void pl330_tasklet(unsigned long data) | |||
2322 | /* Pick up ripe tomatoes */ | 2322 | /* Pick up ripe tomatoes */ |
2323 | list_for_each_entry_safe(desc, _dt, &pch->work_list, node) | 2323 | list_for_each_entry_safe(desc, _dt, &pch->work_list, node) |
2324 | if (desc->status == DONE) { | 2324 | if (desc->status == DONE) { |
2325 | dma_cookie_complete(&desc->txd); | 2325 | if (pch->cyclic) |
2326 | dma_cookie_complete(&desc->txd); | ||
2326 | list_move_tail(&desc->node, &list); | 2327 | list_move_tail(&desc->node, &list); |
2327 | } | 2328 | } |
2328 | 2329 | ||
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 2fd87b544a93..eb3d138ff55a 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
@@ -1632,6 +1632,21 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti) | |||
1632 | pool->low_water_blocks = pt->low_water_blocks; | 1632 | pool->low_water_blocks = pt->low_water_blocks; |
1633 | pool->pf = pt->pf; | 1633 | pool->pf = pt->pf; |
1634 | 1634 | ||
1635 | /* | ||
1636 | * If discard_passdown was enabled verify that the data device | ||
1637 | * supports discards. Disable discard_passdown if not; otherwise | ||
1638 | * -EOPNOTSUPP will be returned. | ||
1639 | */ | ||
1640 | if (pt->pf.discard_passdown) { | ||
1641 | struct request_queue *q = bdev_get_queue(pt->data_dev->bdev); | ||
1642 | if (!q || !blk_queue_discard(q)) { | ||
1643 | char buf[BDEVNAME_SIZE]; | ||
1644 | DMWARN("Discard unsupported by data device (%s): Disabling discard passdown.", | ||
1645 | bdevname(pt->data_dev->bdev, buf)); | ||
1646 | pool->pf.discard_passdown = 0; | ||
1647 | } | ||
1648 | } | ||
1649 | |||
1635 | return 0; | 1650 | return 0; |
1636 | } | 1651 | } |
1637 | 1652 | ||
@@ -1988,19 +2003,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
1988 | goto out_flags_changed; | 2003 | goto out_flags_changed; |
1989 | } | 2004 | } |
1990 | 2005 | ||
1991 | /* | ||
1992 | * If discard_passdown was enabled verify that the data device | ||
1993 | * supports discards. Disable discard_passdown if not; otherwise | ||
1994 | * -EOPNOTSUPP will be returned. | ||
1995 | */ | ||
1996 | if (pf.discard_passdown) { | ||
1997 | struct request_queue *q = bdev_get_queue(data_dev->bdev); | ||
1998 | if (!q || !blk_queue_discard(q)) { | ||
1999 | DMWARN("Discard unsupported by data device: Disabling discard passdown."); | ||
2000 | pf.discard_passdown = 0; | ||
2001 | } | ||
2002 | } | ||
2003 | |||
2004 | pt->pool = pool; | 2006 | pt->pool = pool; |
2005 | pt->ti = ti; | 2007 | pt->ti = ti; |
2006 | pt->metadata_dev = metadata_dev; | 2008 | pt->metadata_dev = metadata_dev; |
@@ -2385,7 +2387,7 @@ static int pool_status(struct dm_target *ti, status_type_t type, | |||
2385 | (unsigned long long)pt->low_water_blocks); | 2387 | (unsigned long long)pt->low_water_blocks); |
2386 | 2388 | ||
2387 | count = !pool->pf.zero_new_blocks + !pool->pf.discard_enabled + | 2389 | count = !pool->pf.zero_new_blocks + !pool->pf.discard_enabled + |
2388 | !pool->pf.discard_passdown; | 2390 | !pt->pf.discard_passdown; |
2389 | DMEMIT("%u ", count); | 2391 | DMEMIT("%u ", count); |
2390 | 2392 | ||
2391 | if (!pool->pf.zero_new_blocks) | 2393 | if (!pool->pf.zero_new_blocks) |
@@ -2394,7 +2396,7 @@ static int pool_status(struct dm_target *ti, status_type_t type, | |||
2394 | if (!pool->pf.discard_enabled) | 2396 | if (!pool->pf.discard_enabled) |
2395 | DMEMIT("ignore_discard "); | 2397 | DMEMIT("ignore_discard "); |
2396 | 2398 | ||
2397 | if (!pool->pf.discard_passdown) | 2399 | if (!pt->pf.discard_passdown) |
2398 | DMEMIT("no_discard_passdown "); | 2400 | DMEMIT("no_discard_passdown "); |
2399 | 2401 | ||
2400 | break; | 2402 | break; |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 477eb2e180c0..01233d855eb2 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -391,6 +391,8 @@ void mddev_suspend(struct mddev *mddev) | |||
391 | synchronize_rcu(); | 391 | synchronize_rcu(); |
392 | wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0); | 392 | wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0); |
393 | mddev->pers->quiesce(mddev, 1); | 393 | mddev->pers->quiesce(mddev, 1); |
394 | |||
395 | del_timer_sync(&mddev->safemode_timer); | ||
394 | } | 396 | } |
395 | EXPORT_SYMBOL_GPL(mddev_suspend); | 397 | EXPORT_SYMBOL_GPL(mddev_suspend); |
396 | 398 | ||
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index c8dbb84d5357..3f91c2e1dfe7 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -3164,12 +3164,40 @@ raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks) | |||
3164 | return size << conf->chunk_shift; | 3164 | return size << conf->chunk_shift; |
3165 | } | 3165 | } |
3166 | 3166 | ||
3167 | static void calc_sectors(struct r10conf *conf, sector_t size) | ||
3168 | { | ||
3169 | /* Calculate the number of sectors-per-device that will | ||
3170 | * actually be used, and set conf->dev_sectors and | ||
3171 | * conf->stride | ||
3172 | */ | ||
3173 | |||
3174 | size = size >> conf->chunk_shift; | ||
3175 | sector_div(size, conf->far_copies); | ||
3176 | size = size * conf->raid_disks; | ||
3177 | sector_div(size, conf->near_copies); | ||
3178 | /* 'size' is now the number of chunks in the array */ | ||
3179 | /* calculate "used chunks per device" */ | ||
3180 | size = size * conf->copies; | ||
3181 | |||
3182 | /* We need to round up when dividing by raid_disks to | ||
3183 | * get the stride size. | ||
3184 | */ | ||
3185 | size = DIV_ROUND_UP_SECTOR_T(size, conf->raid_disks); | ||
3186 | |||
3187 | conf->dev_sectors = size << conf->chunk_shift; | ||
3188 | |||
3189 | if (conf->far_offset) | ||
3190 | conf->stride = 1 << conf->chunk_shift; | ||
3191 | else { | ||
3192 | sector_div(size, conf->far_copies); | ||
3193 | conf->stride = size << conf->chunk_shift; | ||
3194 | } | ||
3195 | } | ||
3167 | 3196 | ||
3168 | static struct r10conf *setup_conf(struct mddev *mddev) | 3197 | static struct r10conf *setup_conf(struct mddev *mddev) |
3169 | { | 3198 | { |
3170 | struct r10conf *conf = NULL; | 3199 | struct r10conf *conf = NULL; |
3171 | int nc, fc, fo; | 3200 | int nc, fc, fo; |
3172 | sector_t stride, size; | ||
3173 | int err = -EINVAL; | 3201 | int err = -EINVAL; |
3174 | 3202 | ||
3175 | if (mddev->new_chunk_sectors < (PAGE_SIZE >> 9) || | 3203 | if (mddev->new_chunk_sectors < (PAGE_SIZE >> 9) || |
@@ -3219,28 +3247,7 @@ static struct r10conf *setup_conf(struct mddev *mddev) | |||
3219 | if (!conf->r10bio_pool) | 3247 | if (!conf->r10bio_pool) |
3220 | goto out; | 3248 | goto out; |
3221 | 3249 | ||
3222 | size = mddev->dev_sectors >> conf->chunk_shift; | 3250 | calc_sectors(conf, mddev->dev_sectors); |
3223 | sector_div(size, fc); | ||
3224 | size = size * conf->raid_disks; | ||
3225 | sector_div(size, nc); | ||
3226 | /* 'size' is now the number of chunks in the array */ | ||
3227 | /* calculate "used chunks per device" in 'stride' */ | ||
3228 | stride = size * conf->copies; | ||
3229 | |||
3230 | /* We need to round up when dividing by raid_disks to | ||
3231 | * get the stride size. | ||
3232 | */ | ||
3233 | stride += conf->raid_disks - 1; | ||
3234 | sector_div(stride, conf->raid_disks); | ||
3235 | |||
3236 | conf->dev_sectors = stride << conf->chunk_shift; | ||
3237 | |||
3238 | if (fo) | ||
3239 | stride = 1; | ||
3240 | else | ||
3241 | sector_div(stride, fc); | ||
3242 | conf->stride = stride << conf->chunk_shift; | ||
3243 | |||
3244 | 3251 | ||
3245 | spin_lock_init(&conf->device_lock); | 3252 | spin_lock_init(&conf->device_lock); |
3246 | INIT_LIST_HEAD(&conf->retry_list); | 3253 | INIT_LIST_HEAD(&conf->retry_list); |
@@ -3468,7 +3475,8 @@ static int raid10_resize(struct mddev *mddev, sector_t sectors) | |||
3468 | mddev->recovery_cp = oldsize; | 3475 | mddev->recovery_cp = oldsize; |
3469 | set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); | 3476 | set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); |
3470 | } | 3477 | } |
3471 | mddev->dev_sectors = sectors; | 3478 | calc_sectors(conf, sectors); |
3479 | mddev->dev_sectors = conf->dev_sectors; | ||
3472 | mddev->resync_max_sectors = size; | 3480 | mddev->resync_max_sectors = size; |
3473 | return 0; | 3481 | return 0; |
3474 | } | 3482 | } |
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 37b7d1c90723..95731c841044 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c | |||
@@ -493,7 +493,11 @@ out: | |||
493 | static void e1000_down_and_stop(struct e1000_adapter *adapter) | 493 | static void e1000_down_and_stop(struct e1000_adapter *adapter) |
494 | { | 494 | { |
495 | set_bit(__E1000_DOWN, &adapter->flags); | 495 | set_bit(__E1000_DOWN, &adapter->flags); |
496 | cancel_work_sync(&adapter->reset_task); | 496 | |
497 | /* Only kill reset task if adapter is not resetting */ | ||
498 | if (!test_bit(__E1000_RESETTING, &adapter->flags)) | ||
499 | cancel_work_sync(&adapter->reset_task); | ||
500 | |||
497 | cancel_delayed_work_sync(&adapter->watchdog_task); | 501 | cancel_delayed_work_sync(&adapter->watchdog_task); |
498 | cancel_delayed_work_sync(&adapter->phy_info_task); | 502 | cancel_delayed_work_sync(&adapter->phy_info_task); |
499 | cancel_delayed_work_sync(&adapter->fifo_stall_task); | 503 | cancel_delayed_work_sync(&adapter->fifo_stall_task); |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index fa58c7869954..9ce6995e8d08 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -501,7 +501,9 @@ static void virtnet_napi_enable(struct virtnet_info *vi) | |||
501 | * We synchronize against interrupts via NAPI_STATE_SCHED */ | 501 | * We synchronize against interrupts via NAPI_STATE_SCHED */ |
502 | if (napi_schedule_prep(&vi->napi)) { | 502 | if (napi_schedule_prep(&vi->napi)) { |
503 | virtqueue_disable_cb(vi->rvq); | 503 | virtqueue_disable_cb(vi->rvq); |
504 | local_bh_disable(); | ||
504 | __napi_schedule(&vi->napi); | 505 | __napi_schedule(&vi->napi); |
506 | local_bh_enable(); | ||
505 | } | 507 | } |
506 | } | 508 | } |
507 | 509 | ||
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index 1929c0c63b75..61e2fefeedab 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c | |||
@@ -223,7 +223,7 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state) | |||
223 | [PCI_D0] = ACPI_STATE_D0, | 223 | [PCI_D0] = ACPI_STATE_D0, |
224 | [PCI_D1] = ACPI_STATE_D1, | 224 | [PCI_D1] = ACPI_STATE_D1, |
225 | [PCI_D2] = ACPI_STATE_D2, | 225 | [PCI_D2] = ACPI_STATE_D2, |
226 | [PCI_D3hot] = ACPI_STATE_D3_HOT, | 226 | [PCI_D3hot] = ACPI_STATE_D3, |
227 | [PCI_D3cold] = ACPI_STATE_D3 | 227 | [PCI_D3cold] = ACPI_STATE_D3 |
228 | }; | 228 | }; |
229 | int error = -EINVAL; | 229 | int error = -EINVAL; |
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c index 684ef4bbfce4..f027c063fb20 100644 --- a/drivers/rtc/rtc-pl031.c +++ b/drivers/rtc/rtc-pl031.c | |||
@@ -312,6 +312,7 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id) | |||
312 | int ret; | 312 | int ret; |
313 | struct pl031_local *ldata; | 313 | struct pl031_local *ldata; |
314 | struct rtc_class_ops *ops = id->data; | 314 | struct rtc_class_ops *ops = id->data; |
315 | unsigned long time; | ||
315 | 316 | ||
316 | ret = amba_request_regions(adev, NULL); | 317 | ret = amba_request_regions(adev, NULL); |
317 | if (ret) | 318 | if (ret) |
@@ -343,6 +344,23 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id) | |||
343 | writel(readl(ldata->base + RTC_CR) | RTC_CR_CWEN, | 344 | writel(readl(ldata->base + RTC_CR) | RTC_CR_CWEN, |
344 | ldata->base + RTC_CR); | 345 | ldata->base + RTC_CR); |
345 | 346 | ||
347 | /* | ||
348 | * On ST PL031 variants, the RTC reset value does not provide correct | ||
349 | * weekday for 2000-01-01. Correct the erroneous sunday to saturday. | ||
350 | */ | ||
351 | if (ldata->hw_designer == AMBA_VENDOR_ST) { | ||
352 | if (readl(ldata->base + RTC_YDR) == 0x2000) { | ||
353 | time = readl(ldata->base + RTC_DR); | ||
354 | if ((time & | ||
355 | (RTC_MON_MASK | RTC_MDAY_MASK | RTC_WDAY_MASK)) | ||
356 | == 0x02120000) { | ||
357 | time = time | (0x7 << RTC_WDAY_SHIFT); | ||
358 | writel(0x2000, ldata->base + RTC_YLR); | ||
359 | writel(time, ldata->base + RTC_LR); | ||
360 | } | ||
361 | } | ||
362 | } | ||
363 | |||
346 | ldata->rtc = rtc_device_register("pl031", &adev->dev, ops, | 364 | ldata->rtc = rtc_device_register("pl031", &adev->dev, ops, |
347 | THIS_MODULE); | 365 | THIS_MODULE); |
348 | if (IS_ERR(ldata->rtc)) { | 366 | if (IS_ERR(ldata->rtc)) { |
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 7ed58e2df791..f286955331a2 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c | |||
@@ -169,6 +169,7 @@ static struct se_device *fd_create_virtdevice( | |||
169 | inode = file->f_mapping->host; | 169 | inode = file->f_mapping->host; |
170 | if (S_ISBLK(inode->i_mode)) { | 170 | if (S_ISBLK(inode->i_mode)) { |
171 | struct request_queue *q; | 171 | struct request_queue *q; |
172 | unsigned long long dev_size; | ||
172 | /* | 173 | /* |
173 | * Setup the local scope queue_limits from struct request_queue->limits | 174 | * Setup the local scope queue_limits from struct request_queue->limits |
174 | * to pass into transport_add_device_to_core_hba() as struct se_dev_limits. | 175 | * to pass into transport_add_device_to_core_hba() as struct se_dev_limits. |
@@ -183,13 +184,12 @@ static struct se_device *fd_create_virtdevice( | |||
183 | * one (1) logical sector from underlying struct block_device | 184 | * one (1) logical sector from underlying struct block_device |
184 | */ | 185 | */ |
185 | fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev); | 186 | fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev); |
186 | fd_dev->fd_dev_size = (i_size_read(file->f_mapping->host) - | 187 | dev_size = (i_size_read(file->f_mapping->host) - |
187 | fd_dev->fd_block_size); | 188 | fd_dev->fd_block_size); |
188 | 189 | ||
189 | pr_debug("FILEIO: Using size: %llu bytes from struct" | 190 | pr_debug("FILEIO: Using size: %llu bytes from struct" |
190 | " block_device blocks: %llu logical_block_size: %d\n", | 191 | " block_device blocks: %llu logical_block_size: %d\n", |
191 | fd_dev->fd_dev_size, | 192 | dev_size, div_u64(dev_size, fd_dev->fd_block_size), |
192 | div_u64(fd_dev->fd_dev_size, fd_dev->fd_block_size), | ||
193 | fd_dev->fd_block_size); | 193 | fd_dev->fd_block_size); |
194 | } else { | 194 | } else { |
195 | if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) { | 195 | if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) { |
@@ -605,10 +605,20 @@ static u32 fd_get_device_type(struct se_device *dev) | |||
605 | static sector_t fd_get_blocks(struct se_device *dev) | 605 | static sector_t fd_get_blocks(struct se_device *dev) |
606 | { | 606 | { |
607 | struct fd_dev *fd_dev = dev->dev_ptr; | 607 | struct fd_dev *fd_dev = dev->dev_ptr; |
608 | unsigned long long blocks_long = div_u64(fd_dev->fd_dev_size, | 608 | struct file *f = fd_dev->fd_file; |
609 | dev->se_sub_dev->se_dev_attrib.block_size); | 609 | struct inode *i = f->f_mapping->host; |
610 | unsigned long long dev_size; | ||
611 | /* | ||
612 | * When using a file that references an underlying struct block_device, | ||
613 | * ensure dev_size is always based on the current inode size in order | ||
614 | * to handle underlying block_device resize operations. | ||
615 | */ | ||
616 | if (S_ISBLK(i->i_mode)) | ||
617 | dev_size = (i_size_read(i) - fd_dev->fd_block_size); | ||
618 | else | ||
619 | dev_size = fd_dev->fd_dev_size; | ||
610 | 620 | ||
611 | return blocks_long; | 621 | return div_u64(dev_size, dev->se_sub_dev->se_dev_attrib.block_size); |
612 | } | 622 | } |
613 | 623 | ||
614 | static struct se_subsystem_api fileio_template = { | 624 | static struct se_subsystem_api fileio_template = { |
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 86f0c3b5d500..c3148b10b4b3 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c | |||
@@ -220,6 +220,9 @@ int target_scsi2_reservation_release(struct se_task *task) | |||
220 | if (dev->dev_reserved_node_acl != sess->se_node_acl) | 220 | if (dev->dev_reserved_node_acl != sess->se_node_acl) |
221 | goto out_unlock; | 221 | goto out_unlock; |
222 | 222 | ||
223 | if (dev->dev_res_bin_isid != sess->sess_bin_isid) | ||
224 | goto out_unlock; | ||
225 | |||
223 | dev->dev_reserved_node_acl = NULL; | 226 | dev->dev_reserved_node_acl = NULL; |
224 | dev->dev_flags &= ~DF_SPC2_RESERVATIONS; | 227 | dev->dev_flags &= ~DF_SPC2_RESERVATIONS; |
225 | if (dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID) { | 228 | if (dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID) { |
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index c2d05a8279fd..8807fe501d20 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c | |||
@@ -390,6 +390,7 @@ static void __devexit virtballoon_remove(struct virtio_device *vdev) | |||
390 | /* There might be pages left in the balloon: free them. */ | 390 | /* There might be pages left in the balloon: free them. */ |
391 | while (vb->num_pages) | 391 | while (vb->num_pages) |
392 | leak_balloon(vb, vb->num_pages); | 392 | leak_balloon(vb, vb->num_pages); |
393 | update_balloon_size(vb); | ||
393 | 394 | ||
394 | /* Now we reset the device so we can clean up the queues. */ | 395 | /* Now we reset the device so we can clean up the queues. */ |
395 | vdev->config->reset(vdev); | 396 | vdev->config->reset(vdev); |