diff options
| -rw-r--r-- | arch/arm/mach-orion5x/common.c | 7 | ||||
| -rw-r--r-- | arch/arm/mm/dma-mapping.c | 2 | ||||
| -rw-r--r-- | drivers/edac/i3200_edac.c | 2 | ||||
| -rw-r--r-- | drivers/edac/i5000_edac.c | 4 | ||||
| -rw-r--r-- | drivers/edac/sb_edac.c | 7 | ||||
| -rw-r--r-- | drivers/gpio/gpio-lpc32xx.c | 5 | ||||
| -rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_abi16.c | 2 | ||||
| -rw-r--r-- | drivers/gpu/drm/nouveau/nvc0_fb.c | 1 | ||||
| -rw-r--r-- | drivers/gpu/drm/nouveau/nvc0_fifo.c | 3 | ||||
| -rw-r--r-- | drivers/gpu/drm/nouveau/nve0_fifo.c | 3 | ||||
| -rw-r--r-- | drivers/gpu/drm/udl/udl_connector.c | 7 | ||||
| -rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | 2 | ||||
| -rw-r--r-- | drivers/md/dm-mpath.c | 11 | ||||
| -rw-r--r-- | drivers/md/dm-table.c | 61 | ||||
| -rw-r--r-- | drivers/md/dm-thin.c | 135 | ||||
| -rw-r--r-- | drivers/md/dm-verity.c | 8 | ||||
| -rw-r--r-- | drivers/md/dm.c | 71 | ||||
| -rw-r--r-- | drivers/md/dm.h | 1 | ||||
| -rw-r--r-- | drivers/md/raid10.c | 8 | ||||
| -rw-r--r-- | drivers/md/raid5.c | 1 | ||||
| -rw-r--r-- | drivers/usb/core/devices.c | 2 | ||||
| -rw-r--r-- | drivers/usb/core/hcd.c | 6 | ||||
| -rw-r--r-- | drivers/usb/host/ohci-at91.c | 3 | ||||
| -rw-r--r-- | fs/dcache.c | 2 | ||||
| -rw-r--r-- | mm/huge_memory.c | 1 | ||||
| -rw-r--r-- | sound/soc/codecs/wm2000.c | 2 | ||||
| -rw-r--r-- | sound/usb/endpoint.c | 8 |
27 files changed, 266 insertions, 99 deletions
diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c index 410291c67666..a6cd14ab1e4e 100644 --- a/arch/arm/mach-orion5x/common.c +++ b/arch/arm/mach-orion5x/common.c | |||
| @@ -204,6 +204,13 @@ void __init orion5x_wdt_init(void) | |||
| 204 | void __init orion5x_init_early(void) | 204 | void __init orion5x_init_early(void) |
| 205 | { | 205 | { |
| 206 | orion_time_set_base(TIMER_VIRT_BASE); | 206 | orion_time_set_base(TIMER_VIRT_BASE); |
| 207 | |||
| 208 | /* | ||
| 209 | * Some Orion5x devices allocate their coherent buffers from atomic | ||
| 210 | * context. Increase size of atomic coherent pool to make sure such | ||
| 211 | * the allocations won't fail. | ||
| 212 | */ | ||
| 213 | init_dma_coherent_pool_size(SZ_1M); | ||
| 207 | } | 214 | } |
| 208 | 215 | ||
| 209 | int orion5x_tclk; | 216 | int orion5x_tclk; |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index e59c4ab71bcb..13f555d62491 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
| @@ -346,6 +346,8 @@ static int __init atomic_pool_init(void) | |||
| 346 | (unsigned)pool->size / 1024); | 346 | (unsigned)pool->size / 1024); |
| 347 | return 0; | 347 | return 0; |
| 348 | } | 348 | } |
| 349 | |||
| 350 | kfree(pages); | ||
| 349 | no_pages: | 351 | no_pages: |
| 350 | kfree(bitmap); | 352 | kfree(bitmap); |
| 351 | no_bitmap: | 353 | no_bitmap: |
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c index 47180a08edad..b6653a6fc5d5 100644 --- a/drivers/edac/i3200_edac.c +++ b/drivers/edac/i3200_edac.c | |||
| @@ -391,7 +391,7 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx) | |||
| 391 | for (j = 0; j < nr_channels; j++) { | 391 | for (j = 0; j < nr_channels; j++) { |
| 392 | struct dimm_info *dimm = csrow->channels[j]->dimm; | 392 | struct dimm_info *dimm = csrow->channels[j]->dimm; |
| 393 | 393 | ||
| 394 | dimm->nr_pages = nr_pages / nr_channels; | 394 | dimm->nr_pages = nr_pages; |
| 395 | dimm->grain = nr_pages << PAGE_SHIFT; | 395 | dimm->grain = nr_pages << PAGE_SHIFT; |
| 396 | dimm->mtype = MEM_DDR2; | 396 | dimm->mtype = MEM_DDR2; |
| 397 | dimm->dtype = DEV_UNKNOWN; | 397 | dimm->dtype = DEV_UNKNOWN; |
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c index 39c63757c2a1..6a49dd00b81b 100644 --- a/drivers/edac/i5000_edac.c +++ b/drivers/edac/i5000_edac.c | |||
| @@ -1012,6 +1012,10 @@ static void handle_channel(struct i5000_pvt *pvt, int slot, int channel, | |||
| 1012 | /* add the number of COLUMN bits */ | 1012 | /* add the number of COLUMN bits */ |
| 1013 | addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr); | 1013 | addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr); |
| 1014 | 1014 | ||
| 1015 | /* Dual-rank memories have twice the size */ | ||
| 1016 | if (dinfo->dual_rank) | ||
| 1017 | addrBits++; | ||
| 1018 | |||
| 1015 | addrBits += 6; /* add 64 bits per DIMM */ | 1019 | addrBits += 6; /* add 64 bits per DIMM */ |
| 1016 | addrBits -= 20; /* divide by 2^^20 */ | 1020 | addrBits -= 20; /* divide by 2^^20 */ |
| 1017 | addrBits -= 3; /* 8 bits per bytes */ | 1021 | addrBits -= 3; /* 8 bits per bytes */ |
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c index f3b1f9fafa4b..5715b7c2c517 100644 --- a/drivers/edac/sb_edac.c +++ b/drivers/edac/sb_edac.c | |||
| @@ -513,7 +513,8 @@ static int get_dimm_config(struct mem_ctl_info *mci) | |||
| 513 | { | 513 | { |
| 514 | struct sbridge_pvt *pvt = mci->pvt_info; | 514 | struct sbridge_pvt *pvt = mci->pvt_info; |
| 515 | struct dimm_info *dimm; | 515 | struct dimm_info *dimm; |
| 516 | int i, j, banks, ranks, rows, cols, size, npages; | 516 | unsigned i, j, banks, ranks, rows, cols, npages; |
| 517 | u64 size; | ||
| 517 | u32 reg; | 518 | u32 reg; |
| 518 | enum edac_type mode; | 519 | enum edac_type mode; |
| 519 | enum mem_type mtype; | 520 | enum mem_type mtype; |
| @@ -585,10 +586,10 @@ static int get_dimm_config(struct mem_ctl_info *mci) | |||
| 585 | cols = numcol(mtr); | 586 | cols = numcol(mtr); |
| 586 | 587 | ||
| 587 | /* DDR3 has 8 I/O banks */ | 588 | /* DDR3 has 8 I/O banks */ |
| 588 | size = (rows * cols * banks * ranks) >> (20 - 3); | 589 | size = ((u64)rows * cols * banks * ranks) >> (20 - 3); |
| 589 | npages = MiB_TO_PAGES(size); | 590 | npages = MiB_TO_PAGES(size); |
| 590 | 591 | ||
| 591 | edac_dbg(0, "mc#%d: channel %d, dimm %d, %d Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n", | 592 | edac_dbg(0, "mc#%d: channel %d, dimm %d, %Ld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n", |
| 592 | pvt->sbridge_dev->mc, i, j, | 593 | pvt->sbridge_dev->mc, i, j, |
| 593 | size, npages, | 594 | size, npages, |
| 594 | banks, ranks, rows, cols); | 595 | banks, ranks, rows, cols); |
diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c index 8a420f13905e..ed94b4ea72e9 100644 --- a/drivers/gpio/gpio-lpc32xx.c +++ b/drivers/gpio/gpio-lpc32xx.c | |||
| @@ -308,6 +308,7 @@ static int lpc32xx_gpio_dir_output_p012(struct gpio_chip *chip, unsigned pin, | |||
| 308 | { | 308 | { |
| 309 | struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip); | 309 | struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip); |
| 310 | 310 | ||
| 311 | __set_gpio_level_p012(group, pin, value); | ||
| 311 | __set_gpio_dir_p012(group, pin, 0); | 312 | __set_gpio_dir_p012(group, pin, 0); |
| 312 | 313 | ||
| 313 | return 0; | 314 | return 0; |
| @@ -318,6 +319,7 @@ static int lpc32xx_gpio_dir_output_p3(struct gpio_chip *chip, unsigned pin, | |||
| 318 | { | 319 | { |
| 319 | struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip); | 320 | struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip); |
| 320 | 321 | ||
| 322 | __set_gpio_level_p3(group, pin, value); | ||
| 321 | __set_gpio_dir_p3(group, pin, 0); | 323 | __set_gpio_dir_p3(group, pin, 0); |
| 322 | 324 | ||
| 323 | return 0; | 325 | return 0; |
| @@ -326,6 +328,9 @@ static int lpc32xx_gpio_dir_output_p3(struct gpio_chip *chip, unsigned pin, | |||
| 326 | static int lpc32xx_gpio_dir_out_always(struct gpio_chip *chip, unsigned pin, | 328 | static int lpc32xx_gpio_dir_out_always(struct gpio_chip *chip, unsigned pin, |
| 327 | int value) | 329 | int value) |
| 328 | { | 330 | { |
| 331 | struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip); | ||
| 332 | |||
| 333 | __set_gpo_level_p3(group, pin, value); | ||
| 329 | return 0; | 334 | return 0; |
| 330 | } | 335 | } |
| 331 | 336 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c index ff23d88880e5..3ca240b4413d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c | |||
| @@ -179,7 +179,7 @@ nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS) | |||
| 179 | return 0; | 179 | return 0; |
| 180 | } else | 180 | } else |
| 181 | if (init->class == 0x906e) { | 181 | if (init->class == 0x906e) { |
| 182 | NV_ERROR(dev, "906e not supported yet\n"); | 182 | NV_DEBUG(dev, "906e not supported yet\n"); |
| 183 | return -EINVAL; | 183 | return -EINVAL; |
| 184 | } | 184 | } |
| 185 | 185 | ||
diff --git a/drivers/gpu/drm/nouveau/nvc0_fb.c b/drivers/gpu/drm/nouveau/nvc0_fb.c index f704e942372e..f376c39310df 100644 --- a/drivers/gpu/drm/nouveau/nvc0_fb.c +++ b/drivers/gpu/drm/nouveau/nvc0_fb.c | |||
| @@ -124,6 +124,7 @@ nvc0_fb_init(struct drm_device *dev) | |||
| 124 | priv = dev_priv->engine.fb.priv; | 124 | priv = dev_priv->engine.fb.priv; |
| 125 | 125 | ||
| 126 | nv_wr32(dev, 0x100c10, priv->r100c10 >> 8); | 126 | nv_wr32(dev, 0x100c10, priv->r100c10 >> 8); |
| 127 | nv_mask(dev, 0x17e820, 0x00100000, 0x00000000); /* NV_PLTCG_INTR_EN */ | ||
| 127 | return 0; | 128 | return 0; |
| 128 | } | 129 | } |
| 129 | 130 | ||
diff --git a/drivers/gpu/drm/nouveau/nvc0_fifo.c b/drivers/gpu/drm/nouveau/nvc0_fifo.c index 7d85553d518c..cd39eb99f5b1 100644 --- a/drivers/gpu/drm/nouveau/nvc0_fifo.c +++ b/drivers/gpu/drm/nouveau/nvc0_fifo.c | |||
| @@ -373,7 +373,8 @@ nvc0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit) | |||
| 373 | static void | 373 | static void |
| 374 | nvc0_fifo_isr(struct drm_device *dev) | 374 | nvc0_fifo_isr(struct drm_device *dev) |
| 375 | { | 375 | { |
| 376 | u32 stat = nv_rd32(dev, 0x002100); | 376 | u32 mask = nv_rd32(dev, 0x002140); |
| 377 | u32 stat = nv_rd32(dev, 0x002100) & mask; | ||
| 377 | 378 | ||
| 378 | if (stat & 0x00000100) { | 379 | if (stat & 0x00000100) { |
| 379 | NV_INFO(dev, "PFIFO: unknown status 0x00000100\n"); | 380 | NV_INFO(dev, "PFIFO: unknown status 0x00000100\n"); |
diff --git a/drivers/gpu/drm/nouveau/nve0_fifo.c b/drivers/gpu/drm/nouveau/nve0_fifo.c index e98d144e6eb9..281bece751b6 100644 --- a/drivers/gpu/drm/nouveau/nve0_fifo.c +++ b/drivers/gpu/drm/nouveau/nve0_fifo.c | |||
| @@ -345,7 +345,8 @@ nve0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit) | |||
| 345 | static void | 345 | static void |
| 346 | nve0_fifo_isr(struct drm_device *dev) | 346 | nve0_fifo_isr(struct drm_device *dev) |
| 347 | { | 347 | { |
| 348 | u32 stat = nv_rd32(dev, 0x002100); | 348 | u32 mask = nv_rd32(dev, 0x002140); |
| 349 | u32 stat = nv_rd32(dev, 0x002100) & mask; | ||
| 349 | 350 | ||
| 350 | if (stat & 0x00000100) { | 351 | if (stat & 0x00000100) { |
| 351 | NV_INFO(dev, "PFIFO: unknown status 0x00000100\n"); | 352 | NV_INFO(dev, "PFIFO: unknown status 0x00000100\n"); |
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c index ba055e9ca007..8d9dc44f1f94 100644 --- a/drivers/gpu/drm/udl/udl_connector.c +++ b/drivers/gpu/drm/udl/udl_connector.c | |||
| @@ -69,6 +69,13 @@ static int udl_get_modes(struct drm_connector *connector) | |||
| 69 | static int udl_mode_valid(struct drm_connector *connector, | 69 | static int udl_mode_valid(struct drm_connector *connector, |
| 70 | struct drm_display_mode *mode) | 70 | struct drm_display_mode *mode) |
| 71 | { | 71 | { |
| 72 | struct udl_device *udl = connector->dev->dev_private; | ||
| 73 | if (!udl->sku_pixel_limit) | ||
| 74 | return 0; | ||
| 75 | |||
| 76 | if (mode->vdisplay * mode->hdisplay > udl->sku_pixel_limit) | ||
| 77 | return MODE_VIRTUAL_Y; | ||
| 78 | |||
| 72 | return 0; | 79 | return 0; |
| 73 | } | 80 | } |
| 74 | 81 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index f2fb8f15e2f1..7e0743358dff 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | |||
| @@ -1018,7 +1018,7 @@ int vmw_event_fence_action_create(struct drm_file *file_priv, | |||
| 1018 | } | 1018 | } |
| 1019 | 1019 | ||
| 1020 | 1020 | ||
| 1021 | event = kzalloc(sizeof(event->event), GFP_KERNEL); | 1021 | event = kzalloc(sizeof(*event), GFP_KERNEL); |
| 1022 | if (unlikely(event == NULL)) { | 1022 | if (unlikely(event == NULL)) { |
| 1023 | DRM_ERROR("Failed to allocate an event.\n"); | 1023 | DRM_ERROR("Failed to allocate an event.\n"); |
| 1024 | ret = -ENOMEM; | 1024 | ret = -ENOMEM; |
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index d8abb90a6c2f..034233eefc82 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
| @@ -1555,6 +1555,7 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd, | |||
| 1555 | unsigned long arg) | 1555 | unsigned long arg) |
| 1556 | { | 1556 | { |
| 1557 | struct multipath *m = ti->private; | 1557 | struct multipath *m = ti->private; |
| 1558 | struct pgpath *pgpath; | ||
| 1558 | struct block_device *bdev; | 1559 | struct block_device *bdev; |
| 1559 | fmode_t mode; | 1560 | fmode_t mode; |
| 1560 | unsigned long flags; | 1561 | unsigned long flags; |
| @@ -1570,12 +1571,14 @@ again: | |||
| 1570 | if (!m->current_pgpath) | 1571 | if (!m->current_pgpath) |
| 1571 | __choose_pgpath(m, 0); | 1572 | __choose_pgpath(m, 0); |
| 1572 | 1573 | ||
| 1573 | if (m->current_pgpath) { | 1574 | pgpath = m->current_pgpath; |
| 1574 | bdev = m->current_pgpath->path.dev->bdev; | 1575 | |
| 1575 | mode = m->current_pgpath->path.dev->mode; | 1576 | if (pgpath) { |
| 1577 | bdev = pgpath->path.dev->bdev; | ||
| 1578 | mode = pgpath->path.dev->mode; | ||
| 1576 | } | 1579 | } |
| 1577 | 1580 | ||
| 1578 | if (m->queue_io) | 1581 | if ((pgpath && m->queue_io) || (!pgpath && m->queue_if_no_path)) |
| 1579 | r = -EAGAIN; | 1582 | r = -EAGAIN; |
| 1580 | else if (!bdev) | 1583 | else if (!bdev) |
| 1581 | r = -EIO; | 1584 | r = -EIO; |
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index f90069029aae..100368eb7991 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
| @@ -1212,6 +1212,41 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) | |||
| 1212 | return &t->targets[(KEYS_PER_NODE * n) + k]; | 1212 | return &t->targets[(KEYS_PER_NODE * n) + k]; |
| 1213 | } | 1213 | } |
| 1214 | 1214 | ||
| 1215 | static int count_device(struct dm_target *ti, struct dm_dev *dev, | ||
| 1216 | sector_t start, sector_t len, void *data) | ||
| 1217 | { | ||
| 1218 | unsigned *num_devices = data; | ||
| 1219 | |||
| 1220 | (*num_devices)++; | ||
| 1221 | |||
| 1222 | return 0; | ||
| 1223 | } | ||
| 1224 | |||
| 1225 | /* | ||
| 1226 | * Check whether a table has no data devices attached using each | ||
| 1227 | * target's iterate_devices method. | ||
| 1228 | * Returns false if the result is unknown because a target doesn't | ||
| 1229 | * support iterate_devices. | ||
| 1230 | */ | ||
| 1231 | bool dm_table_has_no_data_devices(struct dm_table *table) | ||
| 1232 | { | ||
| 1233 | struct dm_target *uninitialized_var(ti); | ||
| 1234 | unsigned i = 0, num_devices = 0; | ||
| 1235 | |||
| 1236 | while (i < dm_table_get_num_targets(table)) { | ||
| 1237 | ti = dm_table_get_target(table, i++); | ||
| 1238 | |||
| 1239 | if (!ti->type->iterate_devices) | ||
| 1240 | return false; | ||
| 1241 | |||
| 1242 | ti->type->iterate_devices(ti, count_device, &num_devices); | ||
| 1243 | if (num_devices) | ||
| 1244 | return false; | ||
| 1245 | } | ||
| 1246 | |||
| 1247 | return true; | ||
| 1248 | } | ||
| 1249 | |||
| 1215 | /* | 1250 | /* |
| 1216 | * Establish the new table's queue_limits and validate them. | 1251 | * Establish the new table's queue_limits and validate them. |
| 1217 | */ | 1252 | */ |
| @@ -1354,17 +1389,25 @@ static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev, | |||
| 1354 | return q && blk_queue_nonrot(q); | 1389 | return q && blk_queue_nonrot(q); |
| 1355 | } | 1390 | } |
| 1356 | 1391 | ||
| 1357 | static bool dm_table_is_nonrot(struct dm_table *t) | 1392 | static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev, |
| 1393 | sector_t start, sector_t len, void *data) | ||
| 1394 | { | ||
| 1395 | struct request_queue *q = bdev_get_queue(dev->bdev); | ||
| 1396 | |||
| 1397 | return q && !blk_queue_add_random(q); | ||
| 1398 | } | ||
| 1399 | |||
| 1400 | static bool dm_table_all_devices_attribute(struct dm_table *t, | ||
| 1401 | iterate_devices_callout_fn func) | ||
| 1358 | { | 1402 | { |
| 1359 | struct dm_target *ti; | 1403 | struct dm_target *ti; |
| 1360 | unsigned i = 0; | 1404 | unsigned i = 0; |
| 1361 | 1405 | ||
| 1362 | /* Ensure that all underlying device are non-rotational. */ | ||
| 1363 | while (i < dm_table_get_num_targets(t)) { | 1406 | while (i < dm_table_get_num_targets(t)) { |
| 1364 | ti = dm_table_get_target(t, i++); | 1407 | ti = dm_table_get_target(t, i++); |
| 1365 | 1408 | ||
| 1366 | if (!ti->type->iterate_devices || | 1409 | if (!ti->type->iterate_devices || |
| 1367 | !ti->type->iterate_devices(ti, device_is_nonrot, NULL)) | 1410 | !ti->type->iterate_devices(ti, func, NULL)) |
| 1368 | return 0; | 1411 | return 0; |
| 1369 | } | 1412 | } |
| 1370 | 1413 | ||
| @@ -1396,7 +1439,8 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, | |||
| 1396 | if (!dm_table_discard_zeroes_data(t)) | 1439 | if (!dm_table_discard_zeroes_data(t)) |
| 1397 | q->limits.discard_zeroes_data = 0; | 1440 | q->limits.discard_zeroes_data = 0; |
| 1398 | 1441 | ||
| 1399 | if (dm_table_is_nonrot(t)) | 1442 | /* Ensure that all underlying devices are non-rotational. */ |
| 1443 | if (dm_table_all_devices_attribute(t, device_is_nonrot)) | ||
| 1400 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); | 1444 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); |
| 1401 | else | 1445 | else |
| 1402 | queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q); | 1446 | queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q); |
| @@ -1404,6 +1448,15 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, | |||
| 1404 | dm_table_set_integrity(t); | 1448 | dm_table_set_integrity(t); |
| 1405 | 1449 | ||
| 1406 | /* | 1450 | /* |
| 1451 | * Determine whether or not this queue's I/O timings contribute | ||
| 1452 | * to the entropy pool, Only request-based targets use this. | ||
| 1453 | * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not | ||
| 1454 | * have it set. | ||
| 1455 | */ | ||
| 1456 | if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random)) | ||
| 1457 | queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q); | ||
| 1458 | |||
| 1459 | /* | ||
| 1407 | * QUEUE_FLAG_STACKABLE must be set after all queue settings are | 1460 | * QUEUE_FLAG_STACKABLE must be set after all queue settings are |
| 1408 | * visible to other CPUs because, once the flag is set, incoming bios | 1461 | * visible to other CPUs because, once the flag is set, incoming bios |
| 1409 | * are processed by request-based dm, which refers to the queue | 1462 | * are processed by request-based dm, which refers to the queue |
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index af1fc3b2c2ad..c29410af1e22 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
| @@ -509,9 +509,9 @@ enum pool_mode { | |||
| 509 | struct pool_features { | 509 | struct pool_features { |
| 510 | enum pool_mode mode; | 510 | enum pool_mode mode; |
| 511 | 511 | ||
| 512 | unsigned zero_new_blocks:1; | 512 | bool zero_new_blocks:1; |
| 513 | unsigned discard_enabled:1; | 513 | bool discard_enabled:1; |
| 514 | unsigned discard_passdown:1; | 514 | bool discard_passdown:1; |
| 515 | }; | 515 | }; |
| 516 | 516 | ||
| 517 | struct thin_c; | 517 | struct thin_c; |
| @@ -580,7 +580,8 @@ struct pool_c { | |||
| 580 | struct dm_target_callbacks callbacks; | 580 | struct dm_target_callbacks callbacks; |
| 581 | 581 | ||
| 582 | dm_block_t low_water_blocks; | 582 | dm_block_t low_water_blocks; |
| 583 | struct pool_features pf; | 583 | struct pool_features requested_pf; /* Features requested during table load */ |
| 584 | struct pool_features adjusted_pf; /* Features used after adjusting for constituent devices */ | ||
| 584 | }; | 585 | }; |
| 585 | 586 | ||
| 586 | /* | 587 | /* |
| @@ -1839,6 +1840,47 @@ static void __requeue_bios(struct pool *pool) | |||
| 1839 | /*---------------------------------------------------------------- | 1840 | /*---------------------------------------------------------------- |
| 1840 | * Binding of control targets to a pool object | 1841 | * Binding of control targets to a pool object |
| 1841 | *--------------------------------------------------------------*/ | 1842 | *--------------------------------------------------------------*/ |
| 1843 | static bool data_dev_supports_discard(struct pool_c *pt) | ||
| 1844 | { | ||
| 1845 | struct request_queue *q = bdev_get_queue(pt->data_dev->bdev); | ||
| 1846 | |||
| 1847 | return q && blk_queue_discard(q); | ||
| 1848 | } | ||
| 1849 | |||
| 1850 | /* | ||
| 1851 | * If discard_passdown was enabled verify that the data device | ||
| 1852 | * supports discards. Disable discard_passdown if not. | ||
| 1853 | */ | ||
| 1854 | static void disable_passdown_if_not_supported(struct pool_c *pt) | ||
| 1855 | { | ||
| 1856 | struct pool *pool = pt->pool; | ||
| 1857 | struct block_device *data_bdev = pt->data_dev->bdev; | ||
| 1858 | struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits; | ||
| 1859 | sector_t block_size = pool->sectors_per_block << SECTOR_SHIFT; | ||
| 1860 | const char *reason = NULL; | ||
| 1861 | char buf[BDEVNAME_SIZE]; | ||
| 1862 | |||
| 1863 | if (!pt->adjusted_pf.discard_passdown) | ||
| 1864 | return; | ||
| 1865 | |||
| 1866 | if (!data_dev_supports_discard(pt)) | ||
| 1867 | reason = "discard unsupported"; | ||
| 1868 | |||
| 1869 | else if (data_limits->max_discard_sectors < pool->sectors_per_block) | ||
| 1870 | reason = "max discard sectors smaller than a block"; | ||
| 1871 | |||
| 1872 | else if (data_limits->discard_granularity > block_size) | ||
| 1873 | reason = "discard granularity larger than a block"; | ||
| 1874 | |||
| 1875 | else if (block_size & (data_limits->discard_granularity - 1)) | ||
| 1876 | reason = "discard granularity not a factor of block size"; | ||
| 1877 | |||
| 1878 | if (reason) { | ||
| 1879 | DMWARN("Data device (%s) %s: Disabling discard passdown.", bdevname(data_bdev, buf), reason); | ||
| 1880 | pt->adjusted_pf.discard_passdown = false; | ||
| 1881 | } | ||
| 1882 | } | ||
| 1883 | |||
| 1842 | static int bind_control_target(struct pool *pool, struct dm_target *ti) | 1884 | static int bind_control_target(struct pool *pool, struct dm_target *ti) |
| 1843 | { | 1885 | { |
| 1844 | struct pool_c *pt = ti->private; | 1886 | struct pool_c *pt = ti->private; |
| @@ -1847,31 +1889,16 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti) | |||
| 1847 | * We want to make sure that degraded pools are never upgraded. | 1889 | * We want to make sure that degraded pools are never upgraded. |
| 1848 | */ | 1890 | */ |
| 1849 | enum pool_mode old_mode = pool->pf.mode; | 1891 | enum pool_mode old_mode = pool->pf.mode; |
| 1850 | enum pool_mode new_mode = pt->pf.mode; | 1892 | enum pool_mode new_mode = pt->adjusted_pf.mode; |
| 1851 | 1893 | ||
| 1852 | if (old_mode > new_mode) | 1894 | if (old_mode > new_mode) |
| 1853 | new_mode = old_mode; | 1895 | new_mode = old_mode; |
| 1854 | 1896 | ||
| 1855 | pool->ti = ti; | 1897 | pool->ti = ti; |
| 1856 | pool->low_water_blocks = pt->low_water_blocks; | 1898 | pool->low_water_blocks = pt->low_water_blocks; |
| 1857 | pool->pf = pt->pf; | 1899 | pool->pf = pt->adjusted_pf; |
| 1858 | set_pool_mode(pool, new_mode); | ||
| 1859 | 1900 | ||
| 1860 | /* | 1901 | set_pool_mode(pool, new_mode); |
| 1861 | * If discard_passdown was enabled verify that the data device | ||
| 1862 | * supports discards. Disable discard_passdown if not; otherwise | ||
| 1863 | * -EOPNOTSUPP will be returned. | ||
| 1864 | */ | ||
| 1865 | /* FIXME: pull this out into a sep fn. */ | ||
| 1866 | if (pt->pf.discard_passdown) { | ||
| 1867 | struct request_queue *q = bdev_get_queue(pt->data_dev->bdev); | ||
| 1868 | if (!q || !blk_queue_discard(q)) { | ||
| 1869 | char buf[BDEVNAME_SIZE]; | ||
| 1870 | DMWARN("Discard unsupported by data device (%s): Disabling discard passdown.", | ||
| 1871 | bdevname(pt->data_dev->bdev, buf)); | ||
| 1872 | pool->pf.discard_passdown = 0; | ||
| 1873 | } | ||
| 1874 | } | ||
| 1875 | 1902 | ||
| 1876 | return 0; | 1903 | return 0; |
| 1877 | } | 1904 | } |
| @@ -1889,9 +1916,9 @@ static void unbind_control_target(struct pool *pool, struct dm_target *ti) | |||
| 1889 | static void pool_features_init(struct pool_features *pf) | 1916 | static void pool_features_init(struct pool_features *pf) |
| 1890 | { | 1917 | { |
| 1891 | pf->mode = PM_WRITE; | 1918 | pf->mode = PM_WRITE; |
| 1892 | pf->zero_new_blocks = 1; | 1919 | pf->zero_new_blocks = true; |
| 1893 | pf->discard_enabled = 1; | 1920 | pf->discard_enabled = true; |
| 1894 | pf->discard_passdown = 1; | 1921 | pf->discard_passdown = true; |
| 1895 | } | 1922 | } |
| 1896 | 1923 | ||
| 1897 | static void __pool_destroy(struct pool *pool) | 1924 | static void __pool_destroy(struct pool *pool) |
| @@ -2119,13 +2146,13 @@ static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf, | |||
| 2119 | argc--; | 2146 | argc--; |
| 2120 | 2147 | ||
| 2121 | if (!strcasecmp(arg_name, "skip_block_zeroing")) | 2148 | if (!strcasecmp(arg_name, "skip_block_zeroing")) |
| 2122 | pf->zero_new_blocks = 0; | 2149 | pf->zero_new_blocks = false; |
| 2123 | 2150 | ||
| 2124 | else if (!strcasecmp(arg_name, "ignore_discard")) | 2151 | else if (!strcasecmp(arg_name, "ignore_discard")) |
| 2125 | pf->discard_enabled = 0; | 2152 | pf->discard_enabled = false; |
| 2126 | 2153 | ||
| 2127 | else if (!strcasecmp(arg_name, "no_discard_passdown")) | 2154 | else if (!strcasecmp(arg_name, "no_discard_passdown")) |
| 2128 | pf->discard_passdown = 0; | 2155 | pf->discard_passdown = false; |
| 2129 | 2156 | ||
| 2130 | else if (!strcasecmp(arg_name, "read_only")) | 2157 | else if (!strcasecmp(arg_name, "read_only")) |
| 2131 | pf->mode = PM_READ_ONLY; | 2158 | pf->mode = PM_READ_ONLY; |
| @@ -2259,8 +2286,9 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
| 2259 | pt->metadata_dev = metadata_dev; | 2286 | pt->metadata_dev = metadata_dev; |
| 2260 | pt->data_dev = data_dev; | 2287 | pt->data_dev = data_dev; |
| 2261 | pt->low_water_blocks = low_water_blocks; | 2288 | pt->low_water_blocks = low_water_blocks; |
| 2262 | pt->pf = pf; | 2289 | pt->adjusted_pf = pt->requested_pf = pf; |
| 2263 | ti->num_flush_requests = 1; | 2290 | ti->num_flush_requests = 1; |
| 2291 | |||
| 2264 | /* | 2292 | /* |
| 2265 | * Only need to enable discards if the pool should pass | 2293 | * Only need to enable discards if the pool should pass |
| 2266 | * them down to the data device. The thin device's discard | 2294 | * them down to the data device. The thin device's discard |
| @@ -2268,12 +2296,14 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
| 2268 | */ | 2296 | */ |
| 2269 | if (pf.discard_enabled && pf.discard_passdown) { | 2297 | if (pf.discard_enabled && pf.discard_passdown) { |
| 2270 | ti->num_discard_requests = 1; | 2298 | ti->num_discard_requests = 1; |
| 2299 | |||
| 2271 | /* | 2300 | /* |
| 2272 | * Setting 'discards_supported' circumvents the normal | 2301 | * Setting 'discards_supported' circumvents the normal |
| 2273 | * stacking of discard limits (this keeps the pool and | 2302 | * stacking of discard limits (this keeps the pool and |
| 2274 | * thin devices' discard limits consistent). | 2303 | * thin devices' discard limits consistent). |
| 2275 | */ | 2304 | */ |
| 2276 | ti->discards_supported = true; | 2305 | ti->discards_supported = true; |
| 2306 | ti->discard_zeroes_data_unsupported = true; | ||
| 2277 | } | 2307 | } |
| 2278 | ti->private = pt; | 2308 | ti->private = pt; |
| 2279 | 2309 | ||
| @@ -2703,7 +2733,7 @@ static int pool_status(struct dm_target *ti, status_type_t type, | |||
| 2703 | format_dev_t(buf2, pt->data_dev->bdev->bd_dev), | 2733 | format_dev_t(buf2, pt->data_dev->bdev->bd_dev), |
| 2704 | (unsigned long)pool->sectors_per_block, | 2734 | (unsigned long)pool->sectors_per_block, |
| 2705 | (unsigned long long)pt->low_water_blocks); | 2735 | (unsigned long long)pt->low_water_blocks); |
| 2706 | emit_flags(&pt->pf, result, sz, maxlen); | 2736 | emit_flags(&pt->requested_pf, result, sz, maxlen); |
| 2707 | break; | 2737 | break; |
| 2708 | } | 2738 | } |
| 2709 | 2739 | ||
| @@ -2732,20 +2762,21 @@ static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm, | |||
| 2732 | return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); | 2762 | return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); |
| 2733 | } | 2763 | } |
| 2734 | 2764 | ||
| 2735 | static void set_discard_limits(struct pool *pool, struct queue_limits *limits) | 2765 | static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits) |
| 2736 | { | 2766 | { |
| 2737 | /* | 2767 | struct pool *pool = pt->pool; |
| 2738 | * FIXME: these limits may be incompatible with the pool's data device | 2768 | struct queue_limits *data_limits; |
| 2739 | */ | 2769 | |
| 2740 | limits->max_discard_sectors = pool->sectors_per_block; | 2770 | limits->max_discard_sectors = pool->sectors_per_block; |
| 2741 | 2771 | ||
| 2742 | /* | 2772 | /* |
| 2743 | * This is just a hint, and not enforced. We have to cope with | 2773 | * discard_granularity is just a hint, and not enforced. |
| 2744 | * bios that cover a block partially. A discard that spans a block | ||
| 2745 | * boundary is not sent to this target. | ||
| 2746 | */ | 2774 | */ |
| 2747 | limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; | 2775 | if (pt->adjusted_pf.discard_passdown) { |
| 2748 | limits->discard_zeroes_data = pool->pf.zero_new_blocks; | 2776 | data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits; |
| 2777 | limits->discard_granularity = data_limits->discard_granularity; | ||
| 2778 | } else | ||
| 2779 | limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; | ||
| 2749 | } | 2780 | } |
| 2750 | 2781 | ||
| 2751 | static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits) | 2782 | static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits) |
| @@ -2755,15 +2786,25 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits) | |||
| 2755 | 2786 | ||
| 2756 | blk_limits_io_min(limits, 0); | 2787 | blk_limits_io_min(limits, 0); |
| 2757 | blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT); | 2788 | blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT); |
| 2758 | if (pool->pf.discard_enabled) | 2789 | |
| 2759 | set_discard_limits(pool, limits); | 2790 | /* |
| 2791 | * pt->adjusted_pf is a staging area for the actual features to use. | ||
| 2792 | * They get transferred to the live pool in bind_control_target() | ||
| 2793 | * called from pool_preresume(). | ||
| 2794 | */ | ||
| 2795 | if (!pt->adjusted_pf.discard_enabled) | ||
| 2796 | return; | ||
| 2797 | |||
| 2798 | disable_passdown_if_not_supported(pt); | ||
| 2799 | |||
| 2800 | set_discard_limits(pt, limits); | ||
| 2760 | } | 2801 | } |
| 2761 | 2802 | ||
| 2762 | static struct target_type pool_target = { | 2803 | static struct target_type pool_target = { |
| 2763 | .name = "thin-pool", | 2804 | .name = "thin-pool", |
| 2764 | .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | | 2805 | .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | |
| 2765 | DM_TARGET_IMMUTABLE, | 2806 | DM_TARGET_IMMUTABLE, |
| 2766 | .version = {1, 3, 0}, | 2807 | .version = {1, 4, 0}, |
| 2767 | .module = THIS_MODULE, | 2808 | .module = THIS_MODULE, |
| 2768 | .ctr = pool_ctr, | 2809 | .ctr = pool_ctr, |
| 2769 | .dtr = pool_dtr, | 2810 | .dtr = pool_dtr, |
| @@ -3042,19 +3083,19 @@ static int thin_iterate_devices(struct dm_target *ti, | |||
| 3042 | return 0; | 3083 | return 0; |
| 3043 | } | 3084 | } |
| 3044 | 3085 | ||
| 3086 | /* | ||
| 3087 | * A thin device always inherits its queue limits from its pool. | ||
| 3088 | */ | ||
| 3045 | static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits) | 3089 | static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits) |
| 3046 | { | 3090 | { |
| 3047 | struct thin_c *tc = ti->private; | 3091 | struct thin_c *tc = ti->private; |
| 3048 | struct pool *pool = tc->pool; | ||
| 3049 | 3092 | ||
| 3050 | blk_limits_io_min(limits, 0); | 3093 | *limits = bdev_get_queue(tc->pool_dev->bdev)->limits; |
| 3051 | blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT); | ||
| 3052 | set_discard_limits(pool, limits); | ||
| 3053 | } | 3094 | } |
| 3054 | 3095 | ||
| 3055 | static struct target_type thin_target = { | 3096 | static struct target_type thin_target = { |
| 3056 | .name = "thin", | 3097 | .name = "thin", |
| 3057 | .version = {1, 3, 0}, | 3098 | .version = {1, 4, 0}, |
| 3058 | .module = THIS_MODULE, | 3099 | .module = THIS_MODULE, |
| 3059 | .ctr = thin_ctr, | 3100 | .ctr = thin_ctr, |
| 3060 | .dtr = thin_dtr, | 3101 | .dtr = thin_dtr, |
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c index 254d19268ad2..892ae2766aa6 100644 --- a/drivers/md/dm-verity.c +++ b/drivers/md/dm-verity.c | |||
| @@ -718,8 +718,8 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
| 718 | v->hash_dev_block_bits = ffs(num) - 1; | 718 | v->hash_dev_block_bits = ffs(num) - 1; |
| 719 | 719 | ||
| 720 | if (sscanf(argv[5], "%llu%c", &num_ll, &dummy) != 1 || | 720 | if (sscanf(argv[5], "%llu%c", &num_ll, &dummy) != 1 || |
| 721 | num_ll << (v->data_dev_block_bits - SECTOR_SHIFT) != | 721 | (sector_t)(num_ll << (v->data_dev_block_bits - SECTOR_SHIFT)) |
| 722 | (sector_t)num_ll << (v->data_dev_block_bits - SECTOR_SHIFT)) { | 722 | >> (v->data_dev_block_bits - SECTOR_SHIFT) != num_ll) { |
| 723 | ti->error = "Invalid data blocks"; | 723 | ti->error = "Invalid data blocks"; |
| 724 | r = -EINVAL; | 724 | r = -EINVAL; |
| 725 | goto bad; | 725 | goto bad; |
| @@ -733,8 +733,8 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
| 733 | } | 733 | } |
| 734 | 734 | ||
| 735 | if (sscanf(argv[6], "%llu%c", &num_ll, &dummy) != 1 || | 735 | if (sscanf(argv[6], "%llu%c", &num_ll, &dummy) != 1 || |
| 736 | num_ll << (v->hash_dev_block_bits - SECTOR_SHIFT) != | 736 | (sector_t)(num_ll << (v->hash_dev_block_bits - SECTOR_SHIFT)) |
| 737 | (sector_t)num_ll << (v->hash_dev_block_bits - SECTOR_SHIFT)) { | 737 | >> (v->hash_dev_block_bits - SECTOR_SHIFT) != num_ll) { |
| 738 | ti->error = "Invalid hash start"; | 738 | ti->error = "Invalid hash start"; |
| 739 | r = -EINVAL; | 739 | r = -EINVAL; |
| 740 | goto bad; | 740 | goto bad; |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 4e09b6ff5b49..67ffa391edcf 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
| @@ -865,10 +865,14 @@ static void dm_done(struct request *clone, int error, bool mapped) | |||
| 865 | { | 865 | { |
| 866 | int r = error; | 866 | int r = error; |
| 867 | struct dm_rq_target_io *tio = clone->end_io_data; | 867 | struct dm_rq_target_io *tio = clone->end_io_data; |
| 868 | dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io; | 868 | dm_request_endio_fn rq_end_io = NULL; |
| 869 | 869 | ||
| 870 | if (mapped && rq_end_io) | 870 | if (tio->ti) { |
| 871 | r = rq_end_io(tio->ti, clone, error, &tio->info); | 871 | rq_end_io = tio->ti->type->rq_end_io; |
| 872 | |||
| 873 | if (mapped && rq_end_io) | ||
| 874 | r = rq_end_io(tio->ti, clone, error, &tio->info); | ||
| 875 | } | ||
| 872 | 876 | ||
| 873 | if (r <= 0) | 877 | if (r <= 0) |
| 874 | /* The target wants to complete the I/O */ | 878 | /* The target wants to complete the I/O */ |
| @@ -1588,15 +1592,6 @@ static int map_request(struct dm_target *ti, struct request *clone, | |||
| 1588 | int r, requeued = 0; | 1592 | int r, requeued = 0; |
| 1589 | struct dm_rq_target_io *tio = clone->end_io_data; | 1593 | struct dm_rq_target_io *tio = clone->end_io_data; |
| 1590 | 1594 | ||
| 1591 | /* | ||
| 1592 | * Hold the md reference here for the in-flight I/O. | ||
| 1593 | * We can't rely on the reference count by device opener, | ||
| 1594 | * because the device may be closed during the request completion | ||
| 1595 | * when all bios are completed. | ||
| 1596 | * See the comment in rq_completed() too. | ||
| 1597 | */ | ||
| 1598 | dm_get(md); | ||
| 1599 | |||
| 1600 | tio->ti = ti; | 1595 | tio->ti = ti; |
| 1601 | r = ti->type->map_rq(ti, clone, &tio->info); | 1596 | r = ti->type->map_rq(ti, clone, &tio->info); |
| 1602 | switch (r) { | 1597 | switch (r) { |
| @@ -1628,6 +1623,26 @@ static int map_request(struct dm_target *ti, struct request *clone, | |||
| 1628 | return requeued; | 1623 | return requeued; |
| 1629 | } | 1624 | } |
| 1630 | 1625 | ||
| 1626 | static struct request *dm_start_request(struct mapped_device *md, struct request *orig) | ||
| 1627 | { | ||
| 1628 | struct request *clone; | ||
| 1629 | |||
| 1630 | blk_start_request(orig); | ||
| 1631 | clone = orig->special; | ||
| 1632 | atomic_inc(&md->pending[rq_data_dir(clone)]); | ||
| 1633 | |||
| 1634 | /* | ||
| 1635 | * Hold the md reference here for the in-flight I/O. | ||
| 1636 | * We can't rely on the reference count by device opener, | ||
| 1637 | * because the device may be closed during the request completion | ||
| 1638 | * when all bios are completed. | ||
| 1639 | * See the comment in rq_completed() too. | ||
| 1640 | */ | ||
| 1641 | dm_get(md); | ||
| 1642 | |||
| 1643 | return clone; | ||
| 1644 | } | ||
| 1645 | |||
| 1631 | /* | 1646 | /* |
| 1632 | * q->request_fn for request-based dm. | 1647 | * q->request_fn for request-based dm. |
| 1633 | * Called with the queue lock held. | 1648 | * Called with the queue lock held. |
| @@ -1657,14 +1672,21 @@ static void dm_request_fn(struct request_queue *q) | |||
| 1657 | pos = blk_rq_pos(rq); | 1672 | pos = blk_rq_pos(rq); |
| 1658 | 1673 | ||
| 1659 | ti = dm_table_find_target(map, pos); | 1674 | ti = dm_table_find_target(map, pos); |
| 1660 | BUG_ON(!dm_target_is_valid(ti)); | 1675 | if (!dm_target_is_valid(ti)) { |
| 1676 | /* | ||
| 1677 | * Must perform setup, that dm_done() requires, | ||
| 1678 | * before calling dm_kill_unmapped_request | ||
| 1679 | */ | ||
| 1680 | DMERR_LIMIT("request attempted access beyond the end of device"); | ||
| 1681 | clone = dm_start_request(md, rq); | ||
| 1682 | dm_kill_unmapped_request(clone, -EIO); | ||
| 1683 | continue; | ||
| 1684 | } | ||
| 1661 | 1685 | ||
| 1662 | if (ti->type->busy && ti->type->busy(ti)) | 1686 | if (ti->type->busy && ti->type->busy(ti)) |
| 1663 | goto delay_and_out; | 1687 | goto delay_and_out; |
| 1664 | 1688 | ||
| 1665 | blk_start_request(rq); | 1689 | clone = dm_start_request(md, rq); |
| 1666 | clone = rq->special; | ||
| 1667 | atomic_inc(&md->pending[rq_data_dir(clone)]); | ||
| 1668 | 1690 | ||
| 1669 | spin_unlock(q->queue_lock); | 1691 | spin_unlock(q->queue_lock); |
| 1670 | if (map_request(ti, clone, md)) | 1692 | if (map_request(ti, clone, md)) |
| @@ -1684,8 +1706,6 @@ delay_and_out: | |||
| 1684 | blk_delay_queue(q, HZ / 10); | 1706 | blk_delay_queue(q, HZ / 10); |
| 1685 | out: | 1707 | out: |
| 1686 | dm_table_put(map); | 1708 | dm_table_put(map); |
| 1687 | |||
| 1688 | return; | ||
| 1689 | } | 1709 | } |
| 1690 | 1710 | ||
| 1691 | int dm_underlying_device_busy(struct request_queue *q) | 1711 | int dm_underlying_device_busy(struct request_queue *q) |
| @@ -2409,7 +2429,7 @@ static void dm_queue_flush(struct mapped_device *md) | |||
| 2409 | */ | 2429 | */ |
| 2410 | struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) | 2430 | struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) |
| 2411 | { | 2431 | { |
| 2412 | struct dm_table *map = ERR_PTR(-EINVAL); | 2432 | struct dm_table *live_map, *map = ERR_PTR(-EINVAL); |
| 2413 | struct queue_limits limits; | 2433 | struct queue_limits limits; |
| 2414 | int r; | 2434 | int r; |
| 2415 | 2435 | ||
| @@ -2419,6 +2439,19 @@ struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) | |||
| 2419 | if (!dm_suspended_md(md)) | 2439 | if (!dm_suspended_md(md)) |
| 2420 | goto out; | 2440 | goto out; |
| 2421 | 2441 | ||
| 2442 | /* | ||
| 2443 | * If the new table has no data devices, retain the existing limits. | ||
| 2444 | * This helps multipath with queue_if_no_path if all paths disappear, | ||
| 2445 | * then new I/O is queued based on these limits, and then some paths | ||
| 2446 | * reappear. | ||
| 2447 | */ | ||
| 2448 | if (dm_table_has_no_data_devices(table)) { | ||
| 2449 | live_map = dm_get_live_table(md); | ||
| 2450 | if (live_map) | ||
| 2451 | limits = md->queue->limits; | ||
| 2452 | dm_table_put(live_map); | ||
| 2453 | } | ||
| 2454 | |||
| 2422 | r = dm_calculate_queue_limits(table, &limits); | 2455 | r = dm_calculate_queue_limits(table, &limits); |
| 2423 | if (r) { | 2456 | if (r) { |
| 2424 | map = ERR_PTR(r); | 2457 | map = ERR_PTR(r); |
diff --git a/drivers/md/dm.h b/drivers/md/dm.h index 52eef493d266..6a99fefaa743 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h | |||
| @@ -54,6 +54,7 @@ void dm_table_event_callback(struct dm_table *t, | |||
| 54 | void (*fn)(void *), void *context); | 54 | void (*fn)(void *), void *context); |
| 55 | struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index); | 55 | struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index); |
| 56 | struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector); | 56 | struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector); |
| 57 | bool dm_table_has_no_data_devices(struct dm_table *table); | ||
| 57 | int dm_calculate_queue_limits(struct dm_table *table, | 58 | int dm_calculate_queue_limits(struct dm_table *table, |
| 58 | struct queue_limits *limits); | 59 | struct queue_limits *limits); |
| 59 | void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, | 60 | void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 1c2eb38f3c51..0138a727c1f3 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
| @@ -1512,14 +1512,16 @@ static int _enough(struct r10conf *conf, struct geom *geo, int ignore) | |||
| 1512 | do { | 1512 | do { |
| 1513 | int n = conf->copies; | 1513 | int n = conf->copies; |
| 1514 | int cnt = 0; | 1514 | int cnt = 0; |
| 1515 | int this = first; | ||
| 1515 | while (n--) { | 1516 | while (n--) { |
| 1516 | if (conf->mirrors[first].rdev && | 1517 | if (conf->mirrors[this].rdev && |
| 1517 | first != ignore) | 1518 | this != ignore) |
| 1518 | cnt++; | 1519 | cnt++; |
| 1519 | first = (first+1) % geo->raid_disks; | 1520 | this = (this+1) % geo->raid_disks; |
| 1520 | } | 1521 | } |
| 1521 | if (cnt == 0) | 1522 | if (cnt == 0) |
| 1522 | return 0; | 1523 | return 0; |
| 1524 | first = (first + geo->near_copies) % geo->raid_disks; | ||
| 1523 | } while (first != 0); | 1525 | } while (first != 0); |
| 1524 | return 1; | 1526 | return 1; |
| 1525 | } | 1527 | } |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 7031b865b3a0..0689173fd9f5 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
| @@ -1591,6 +1591,7 @@ static int resize_stripes(struct r5conf *conf, int newsize) | |||
| 1591 | #ifdef CONFIG_MULTICORE_RAID456 | 1591 | #ifdef CONFIG_MULTICORE_RAID456 |
| 1592 | init_waitqueue_head(&nsh->ops.wait_for_ops); | 1592 | init_waitqueue_head(&nsh->ops.wait_for_ops); |
| 1593 | #endif | 1593 | #endif |
| 1594 | spin_lock_init(&nsh->stripe_lock); | ||
| 1594 | 1595 | ||
| 1595 | list_add(&nsh->lru, &newstripes); | 1596 | list_add(&nsh->lru, &newstripes); |
| 1596 | } | 1597 | } |
diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c index d95696584762..3440812b4a84 100644 --- a/drivers/usb/core/devices.c +++ b/drivers/usb/core/devices.c | |||
| @@ -624,7 +624,7 @@ static ssize_t usb_device_read(struct file *file, char __user *buf, | |||
| 624 | /* print devices for all busses */ | 624 | /* print devices for all busses */ |
| 625 | list_for_each_entry(bus, &usb_bus_list, bus_list) { | 625 | list_for_each_entry(bus, &usb_bus_list, bus_list) { |
| 626 | /* recurse through all children of the root hub */ | 626 | /* recurse through all children of the root hub */ |
| 627 | if (!bus->root_hub) | 627 | if (!bus_to_hcd(bus)->rh_registered) |
| 628 | continue; | 628 | continue; |
| 629 | usb_lock_device(bus->root_hub); | 629 | usb_lock_device(bus->root_hub); |
| 630 | ret = usb_device_dump(&buf, &nbytes, &skip_bytes, ppos, | 630 | ret = usb_device_dump(&buf, &nbytes, &skip_bytes, ppos, |
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index bc84106ac057..75ba2091f9b4 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c | |||
| @@ -1011,10 +1011,7 @@ static int register_root_hub(struct usb_hcd *hcd) | |||
| 1011 | if (retval) { | 1011 | if (retval) { |
| 1012 | dev_err (parent_dev, "can't register root hub for %s, %d\n", | 1012 | dev_err (parent_dev, "can't register root hub for %s, %d\n", |
| 1013 | dev_name(&usb_dev->dev), retval); | 1013 | dev_name(&usb_dev->dev), retval); |
| 1014 | } | 1014 | } else { |
| 1015 | mutex_unlock(&usb_bus_list_lock); | ||
| 1016 | |||
| 1017 | if (retval == 0) { | ||
| 1018 | spin_lock_irq (&hcd_root_hub_lock); | 1015 | spin_lock_irq (&hcd_root_hub_lock); |
| 1019 | hcd->rh_registered = 1; | 1016 | hcd->rh_registered = 1; |
| 1020 | spin_unlock_irq (&hcd_root_hub_lock); | 1017 | spin_unlock_irq (&hcd_root_hub_lock); |
| @@ -1023,6 +1020,7 @@ static int register_root_hub(struct usb_hcd *hcd) | |||
| 1023 | if (HCD_DEAD(hcd)) | 1020 | if (HCD_DEAD(hcd)) |
| 1024 | usb_hc_died (hcd); /* This time clean up */ | 1021 | usb_hc_died (hcd); /* This time clean up */ |
| 1025 | } | 1022 | } |
| 1023 | mutex_unlock(&usb_bus_list_lock); | ||
| 1026 | 1024 | ||
| 1027 | return retval; | 1025 | return retval; |
| 1028 | } | 1026 | } |
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c index aaa8d2bce217..0bf72f943b00 100644 --- a/drivers/usb/host/ohci-at91.c +++ b/drivers/usb/host/ohci-at91.c | |||
| @@ -467,7 +467,8 @@ static irqreturn_t ohci_hcd_at91_overcurrent_irq(int irq, void *data) | |||
| 467 | /* From the GPIO notifying the over-current situation, find | 467 | /* From the GPIO notifying the over-current situation, find |
| 468 | * out the corresponding port */ | 468 | * out the corresponding port */ |
| 469 | at91_for_each_port(port) { | 469 | at91_for_each_port(port) { |
| 470 | if (gpio_to_irq(pdata->overcurrent_pin[port]) == irq) { | 470 | if (gpio_is_valid(pdata->overcurrent_pin[port]) && |
| 471 | gpio_to_irq(pdata->overcurrent_pin[port]) == irq) { | ||
| 471 | gpio = pdata->overcurrent_pin[port]; | 472 | gpio = pdata->overcurrent_pin[port]; |
| 472 | break; | 473 | break; |
| 473 | } | 474 | } |
diff --git a/fs/dcache.c b/fs/dcache.c index 16521a9f2038..0364af2311f4 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
| @@ -1141,7 +1141,7 @@ rename_retry: | |||
| 1141 | EXPORT_SYMBOL(have_submounts); | 1141 | EXPORT_SYMBOL(have_submounts); |
| 1142 | 1142 | ||
| 1143 | /* | 1143 | /* |
| 1144 | * Search the dentry child list for the specified parent, | 1144 | * Search the dentry child list of the specified parent, |
| 1145 | * and move any unused dentries to the end of the unused | 1145 | * and move any unused dentries to the end of the unused |
| 1146 | * list for prune_dcache(). We descend to the next level | 1146 | * list for prune_dcache(). We descend to the next level |
| 1147 | * whenever the d_subdirs list is non-empty and continue | 1147 | * whenever the d_subdirs list is non-empty and continue |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 57c4b9309015..141dbb695097 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
| @@ -1811,7 +1811,6 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page, | |||
| 1811 | src_page = pte_page(pteval); | 1811 | src_page = pte_page(pteval); |
| 1812 | copy_user_highpage(page, src_page, address, vma); | 1812 | copy_user_highpage(page, src_page, address, vma); |
| 1813 | VM_BUG_ON(page_mapcount(src_page) != 1); | 1813 | VM_BUG_ON(page_mapcount(src_page) != 1); |
| 1814 | VM_BUG_ON(page_count(src_page) != 2); | ||
| 1815 | release_pte_page(src_page); | 1814 | release_pte_page(src_page); |
| 1816 | /* | 1815 | /* |
| 1817 | * ptl mostly unnecessary, but preempt has to | 1816 | * ptl mostly unnecessary, but preempt has to |
diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c index 3fd5b29dc933..a3acb7a85f6a 100644 --- a/sound/soc/codecs/wm2000.c +++ b/sound/soc/codecs/wm2000.c | |||
| @@ -702,7 +702,7 @@ static bool wm2000_readable_reg(struct device *dev, unsigned int reg) | |||
| 702 | } | 702 | } |
| 703 | 703 | ||
| 704 | static const struct regmap_config wm2000_regmap = { | 704 | static const struct regmap_config wm2000_regmap = { |
| 705 | .reg_bits = 8, | 705 | .reg_bits = 16, |
| 706 | .val_bits = 8, | 706 | .val_bits = 8, |
| 707 | 707 | ||
| 708 | .max_register = WM2000_REG_IF_CTL, | 708 | .max_register = WM2000_REG_IF_CTL, |
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c index d6e2bb49c59c..060dccb9ec75 100644 --- a/sound/usb/endpoint.c +++ b/sound/usb/endpoint.c | |||
| @@ -197,7 +197,13 @@ static void prepare_outbound_urb(struct snd_usb_endpoint *ep, | |||
| 197 | /* no data provider, so send silence */ | 197 | /* no data provider, so send silence */ |
| 198 | unsigned int offs = 0; | 198 | unsigned int offs = 0; |
| 199 | for (i = 0; i < ctx->packets; ++i) { | 199 | for (i = 0; i < ctx->packets; ++i) { |
| 200 | int counts = ctx->packet_size[i]; | 200 | int counts; |
| 201 | |||
| 202 | if (ctx->packet_size[i]) | ||
| 203 | counts = ctx->packet_size[i]; | ||
| 204 | else | ||
| 205 | counts = snd_usb_endpoint_next_packet_size(ep); | ||
| 206 | |||
| 201 | urb->iso_frame_desc[i].offset = offs * ep->stride; | 207 | urb->iso_frame_desc[i].offset = offs * ep->stride; |
| 202 | urb->iso_frame_desc[i].length = counts * ep->stride; | 208 | urb->iso_frame_desc[i].length = counts * ep->stride; |
| 203 | offs += counts; | 209 | offs += counts; |
