aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/target/target_core_device.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/target/target_core_device.c')
-rw-r--r--drivers/target/target_core_device.c48
1 files changed, 39 insertions, 9 deletions
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index b38b6c993e6..ca6e4a4df13 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -472,9 +472,9 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
472 struct se_dev_entry *deve; 472 struct se_dev_entry *deve;
473 u32 i; 473 u32 i;
474 474
475 spin_lock_bh(&tpg->acl_node_lock); 475 spin_lock_irq(&tpg->acl_node_lock);
476 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) { 476 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
477 spin_unlock_bh(&tpg->acl_node_lock); 477 spin_unlock_irq(&tpg->acl_node_lock);
478 478
479 spin_lock_irq(&nacl->device_list_lock); 479 spin_lock_irq(&nacl->device_list_lock);
480 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 480 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
@@ -491,9 +491,9 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
491 } 491 }
492 spin_unlock_irq(&nacl->device_list_lock); 492 spin_unlock_irq(&nacl->device_list_lock);
493 493
494 spin_lock_bh(&tpg->acl_node_lock); 494 spin_lock_irq(&tpg->acl_node_lock);
495 } 495 }
496 spin_unlock_bh(&tpg->acl_node_lock); 496 spin_unlock_irq(&tpg->acl_node_lock);
497} 497}
498 498
499static struct se_port *core_alloc_port(struct se_device *dev) 499static struct se_port *core_alloc_port(struct se_device *dev)
@@ -839,6 +839,24 @@ int se_dev_check_shutdown(struct se_device *dev)
839 return ret; 839 return ret;
840} 840}
841 841
842u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
843{
844 u32 tmp, aligned_max_sectors;
845 /*
846 * Limit max_sectors to a PAGE_SIZE aligned value for modern
847 * transport_allocate_data_tasks() operation.
848 */
849 tmp = rounddown((max_sectors * block_size), PAGE_SIZE);
850 aligned_max_sectors = (tmp / block_size);
851 if (max_sectors != aligned_max_sectors) {
852 printk(KERN_INFO "Rounding down aligned max_sectors from %u"
853 " to %u\n", max_sectors, aligned_max_sectors);
854 return aligned_max_sectors;
855 }
856
857 return max_sectors;
858}
859
842void se_dev_set_default_attribs( 860void se_dev_set_default_attribs(
843 struct se_device *dev, 861 struct se_device *dev,
844 struct se_dev_limits *dev_limits) 862 struct se_dev_limits *dev_limits)
@@ -878,6 +896,11 @@ void se_dev_set_default_attribs(
878 * max_sectors is based on subsystem plugin dependent requirements. 896 * max_sectors is based on subsystem plugin dependent requirements.
879 */ 897 */
880 dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors; 898 dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors;
899 /*
900 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
901 */
902 limits->max_sectors = se_dev_align_max_sectors(limits->max_sectors,
903 limits->logical_block_size);
881 dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors; 904 dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors;
882 /* 905 /*
883 * Set optimal_sectors from max_sectors, which can be lowered via 906 * Set optimal_sectors from max_sectors, which can be lowered via
@@ -1242,6 +1265,11 @@ int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
1242 return -EINVAL; 1265 return -EINVAL;
1243 } 1266 }
1244 } 1267 }
1268 /*
1269 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
1270 */
1271 max_sectors = se_dev_align_max_sectors(max_sectors,
1272 dev->se_sub_dev->se_dev_attrib.block_size);
1245 1273
1246 dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors; 1274 dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors;
1247 pr_debug("dev[%p]: SE Device max_sectors changed to %u\n", 1275 pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
@@ -1344,15 +1372,17 @@ struct se_lun *core_dev_add_lun(
1344 */ 1372 */
1345 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) { 1373 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
1346 struct se_node_acl *acl; 1374 struct se_node_acl *acl;
1347 spin_lock_bh(&tpg->acl_node_lock); 1375 spin_lock_irq(&tpg->acl_node_lock);
1348 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { 1376 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
1349 if (acl->dynamic_node_acl) { 1377 if (acl->dynamic_node_acl &&
1350 spin_unlock_bh(&tpg->acl_node_lock); 1378 (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
1379 !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
1380 spin_unlock_irq(&tpg->acl_node_lock);
1351 core_tpg_add_node_to_devs(acl, tpg); 1381 core_tpg_add_node_to_devs(acl, tpg);
1352 spin_lock_bh(&tpg->acl_node_lock); 1382 spin_lock_irq(&tpg->acl_node_lock);
1353 } 1383 }
1354 } 1384 }
1355 spin_unlock_bh(&tpg->acl_node_lock); 1385 spin_unlock_irq(&tpg->acl_node_lock);
1356 } 1386 }
1357 1387
1358 return lun_p; 1388 return lun_p;