From 38becbeadfc9318867d98c9d792b40997d5cb264 Mon Sep 17 00:00:00 2001 From: Andy Grover Date: Mon, 25 Nov 2013 11:56:09 -0800 Subject: target: Remove unused ua_dev_list member in struct se_ua Initialized but not used. Signed-off-by: Andy Grover Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c index b04467e..505519b 100644 --- a/drivers/target/target_core_ua.c +++ b/drivers/target/target_core_ua.c @@ -98,7 +98,6 @@ int core_scsi3_ua_allocate( pr_err("Unable to allocate struct se_ua\n"); return -ENOMEM; } - INIT_LIST_HEAD(&ua->ua_dev_list); INIT_LIST_HEAD(&ua->ua_nacl_list); ua->ua_nacl = nacl; diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 45412a6..d6f96c7 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -504,7 +504,6 @@ struct se_ua { u8 ua_asc; u8 ua_ascq; struct se_node_acl *ua_nacl; - struct list_head ua_dev_list; struct list_head ua_nacl_list; }; -- cgit v0.10.2 From 3f0ed57b26ddd6358b4ab2b9bde652fd683fe02d Mon Sep 17 00:00:00 2001 From: Andy Grover Date: Wed, 27 Nov 2013 14:57:56 -0800 Subject: target: Allocate more room for port default groups See target_stat_setup_port_default_groups, we need a 4 element array. Signed-off-by: Andy Grover Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index dae2ad6..fdadc4d 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c @@ -906,7 +906,7 @@ static struct config_group *target_fabric_make_lun( lun_cg->default_groups[1] = NULL; port_stat_grp = &lun->port_stat_grps.stat_group; - port_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3, + port_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 4, GFP_KERNEL); if (!port_stat_grp->default_groups) { pr_err("Unable to allocate port_stat_grp->default_groups\n"); -- cgit v0.10.2 From ab6dae8236767f9815bb00c29a56d045e33cd470 Mon Sep 17 00:00:00 2001 From: Andy Grover Date: Mon, 9 Dec 2013 14:27:36 -0800 Subject: target: Fix sizeof in kmalloc for some default_groups arrays Allocating an array of pointers, not the objects themselves. These two sites now match all the other sites. Signed-off-by: Andy Grover Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 272755d..a1c23d1 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -2937,7 +2937,7 @@ static int __init target_core_init_configfs(void) * and ALUA Logical Unit Group and Target Port Group infrastructure. */ target_cg = &subsys->su_group; - target_cg->default_groups = kmalloc(sizeof(struct config_group) * 2, + target_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2, GFP_KERNEL); if (!target_cg->default_groups) { pr_err("Unable to allocate target_cg->default_groups\n"); diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index fdadc4d..7de9f04 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c @@ -906,7 +906,7 @@ static struct config_group *target_fabric_make_lun( lun_cg->default_groups[1] = NULL; port_stat_grp = &lun->port_stat_grps.stat_group; - port_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 4, + port_stat_grp->default_groups = kzalloc(sizeof(struct config_group *) * 4, GFP_KERNEL); if (!port_stat_grp->default_groups) { pr_err("Unable to allocate port_stat_grp->default_groups\n"); -- cgit v0.10.2 From 2af7973a37059c92330014ee5f39adc94900e7e1 Mon Sep 17 00:00:00 2001 From: Andy Grover Date: Tue, 26 Nov 2013 11:55:22 -0800 Subject: target: Refer to u32 luns as unpacked_lun It's clearer to refer to pointers to the struct se_lun as "lun" and the actual number itself as "unpacked_lun". Signed-off-by: Andy Grover Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 207b340..4b6b787 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -1112,23 +1112,23 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size) struct se_lun *core_dev_add_lun( struct se_portal_group *tpg, struct se_device *dev, - u32 lun) + u32 unpacked_lun) { - struct se_lun *lun_p; + struct se_lun *lun; int rc; - lun_p = core_tpg_pre_addlun(tpg, lun); - if (IS_ERR(lun_p)) - return lun_p; + lun = core_tpg_pre_addlun(tpg, unpacked_lun); + if (IS_ERR(lun)) + return lun; - rc = core_tpg_post_addlun(tpg, lun_p, + rc = core_tpg_post_addlun(tpg, lun, TRANSPORT_LUNFLAGS_READ_WRITE, dev); if (rc < 0) return ERR_PTR(rc); pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from" " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(), - tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun, + tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id); /* * Update LUN maps for dynamically added initiators when @@ -1149,7 +1149,7 @@ struct se_lun *core_dev_add_lun( spin_unlock_irq(&tpg->acl_node_lock); } - return lun_p; + return lun; } /* core_dev_del_lun(): -- cgit v0.10.2 From d344f8a15637e8b57a0d05a6d50182c11de08606 Mon Sep 17 00:00:00 2001 From: Andy Grover Date: Tue, 26 Nov 2013 12:07:54 -0800 Subject: target: Rename core_tpg_{pre,post}_addlun for clarity "pre" is really an allocation function. The only time it isn't called is for virtual_lun0, which is statically allocated. Renaming that to "alloc" lets the other function not need to be "post", and just be called core_tpg_add_lun. (nab: fix minor applying fuzz in core_tpg_setup_virtual_lun0) Signed-off-by: Andy Grover Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 4b6b787..dbd78a1 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -1117,11 +1117,11 @@ struct se_lun *core_dev_add_lun( struct se_lun *lun; int rc; - lun = core_tpg_pre_addlun(tpg, unpacked_lun); + lun = core_tpg_alloc_lun(tpg, unpacked_lun); if (IS_ERR(lun)) return lun; - rc = core_tpg_post_addlun(tpg, lun, + rc = core_tpg_add_lun(tpg, lun, TRANSPORT_LUNFLAGS_READ_WRITE, dev); if (rc < 0) return ERR_PTR(rc); diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h index 47b63b0..f67e764 100644 --- a/drivers/target/target_core_internal.h +++ b/drivers/target/target_core_internal.h @@ -77,8 +77,8 @@ struct se_node_acl *__core_tpg_get_initiator_node_acl(struct se_portal_group *tp const char *); void core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *); void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *); -struct se_lun *core_tpg_pre_addlun(struct se_portal_group *, u32); -int core_tpg_post_addlun(struct se_portal_group *, struct se_lun *, +struct se_lun *core_tpg_alloc_lun(struct se_portal_group *, u32); +int core_tpg_add_lun(struct se_portal_group *, struct se_lun *, u32, void *); struct se_lun *core_tpg_pre_dellun(struct se_portal_group *, u32 unpacked_lun); int core_tpg_post_dellun(struct se_portal_group *, struct se_lun *); diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index f697f8b..a1dbc94 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c @@ -662,7 +662,7 @@ static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg) if (ret < 0) return ret; - ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev); + ret = core_tpg_add_lun(se_tpg, lun, lun_access, dev); if (ret < 0) { percpu_ref_cancel_init(&lun->lun_ref); return ret; @@ -789,7 +789,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg) } EXPORT_SYMBOL(core_tpg_deregister); -struct se_lun *core_tpg_pre_addlun( +struct se_lun *core_tpg_alloc_lun( struct se_portal_group *tpg, u32 unpacked_lun) { @@ -819,7 +819,7 @@ struct se_lun *core_tpg_pre_addlun( return lun; } -int core_tpg_post_addlun( +int core_tpg_add_lun( struct se_portal_group *tpg, struct se_lun *lun, u32 lun_access, -- cgit v0.10.2 From 340dbf729c3395cf1317890d033aa9ac7347766c Mon Sep 17 00:00:00 2001 From: Andy Grover Date: Thu, 12 Dec 2013 16:12:33 -0800 Subject: target: Don't use void* when passing dev in core_tpg_add_lun Especially since it's actually a device. Signed-off-by: Andy Grover Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h index f67e764..1e27614 100644 --- a/drivers/target/target_core_internal.h +++ b/drivers/target/target_core_internal.h @@ -79,7 +79,7 @@ void core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *); void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *); struct se_lun *core_tpg_alloc_lun(struct se_portal_group *, u32); int core_tpg_add_lun(struct se_portal_group *, struct se_lun *, - u32, void *); + u32, struct se_device *); struct se_lun *core_tpg_pre_dellun(struct se_portal_group *, u32 unpacked_lun); int core_tpg_post_dellun(struct se_portal_group *, struct se_lun *); diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index a1dbc94..d1df39a 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c @@ -823,7 +823,7 @@ int core_tpg_add_lun( struct se_portal_group *tpg, struct se_lun *lun, u32 lun_access, - void *lun_ptr) + struct se_device *dev) { int ret; @@ -831,7 +831,7 @@ int core_tpg_add_lun( if (ret < 0) return ret; - ret = core_dev_export(lun_ptr, tpg, lun); + ret = core_dev_export(dev, tpg, lun); if (ret < 0) { percpu_ref_cancel_init(&lun->lun_ref); return ret; -- cgit v0.10.2 From bb91c1a087ed29ceb5b25b9c210c6665e13c36eb Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Tue, 17 Dec 2013 09:18:43 +0100 Subject: target_core_alua: validate ALUA state transition As we now can modify the list of supported states we need to validate the requested ALUA state when doing a state transition. Signed-off-by: Hannes Reinecke Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index fdcee32..292ecce 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -41,11 +41,14 @@ #include "target_core_alua.h" #include "target_core_ua.h" -static sense_reason_t core_alua_check_transition(int state, int *primary); +static sense_reason_t core_alua_check_transition(int state, int valid, + int *primary); static int core_alua_set_tg_pt_secondary_state( struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, struct se_port *port, int explicit, int offline); +static char *core_alua_dump_state(int state); + static u16 alua_lu_gps_counter; static u32 alua_lu_gps_count; @@ -210,7 +213,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd) unsigned char *ptr; sense_reason_t rc = TCM_NO_SENSE; u32 len = 4; /* Skip over RESERVED area in header */ - int alua_access_state, primary = 0; + int alua_access_state, primary = 0, valid_states; u16 tg_pt_id, rtpi; if (!l_port) @@ -252,6 +255,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd) rc = TCM_UNSUPPORTED_SCSI_OPCODE; goto out; } + valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states; ptr = &buf[4]; /* Skip over RESERVED area in header */ @@ -263,7 +267,8 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd) * the state is a primary or secondary target port asymmetric * access state. */ - rc = core_alua_check_transition(alua_access_state, &primary); + rc = core_alua_check_transition(alua_access_state, + valid_states, &primary); if (rc) { /* * If the SET TARGET PORT GROUPS attempts to establish @@ -618,17 +623,31 @@ out: * Check implicit and explicit ALUA state change request. */ static sense_reason_t -core_alua_check_transition(int state, int *primary) +core_alua_check_transition(int state, int valid, int *primary) { + /* + * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are + * defined as primary target port asymmetric access states. + */ switch (state) { case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED: + if (!(valid & ALUA_AO_SUP)) + goto not_supported; + *primary = 1; + break; case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: + if (!(valid & ALUA_AN_SUP)) + goto not_supported; + *primary = 1; + break; case ALUA_ACCESS_STATE_STANDBY: + if (!(valid & ALUA_S_SUP)) + goto not_supported; + *primary = 1; + break; case ALUA_ACCESS_STATE_UNAVAILABLE: - /* - * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are - * defined as primary target port asymmetric access states. - */ + if (!(valid & ALUA_U_SUP)) + goto not_supported; *primary = 1; break; case ALUA_ACCESS_STATE_OFFLINE: @@ -636,14 +655,27 @@ core_alua_check_transition(int state, int *primary) * OFFLINE state is defined as a secondary target port * asymmetric access state. */ + if (!(valid & ALUA_O_SUP)) + goto not_supported; *primary = 0; break; + case ALUA_ACCESS_STATE_TRANSITION: + /* + * Transitioning is set internally, and + * cannot be selected manually. + */ + goto not_supported; default: pr_err("Unknown ALUA access state: 0x%02x\n", state); return TCM_INVALID_PARAMETER_LIST; } return 0; + +not_supported: + pr_err("ALUA access state %s not supported", + core_alua_dump_state(state)); + return TCM_INVALID_PARAMETER_LIST; } static char *core_alua_dump_state(int state) @@ -659,6 +691,8 @@ static char *core_alua_dump_state(int state) return "Unavailable"; case ALUA_ACCESS_STATE_OFFLINE: return "Offline"; + case ALUA_ACCESS_STATE_TRANSITION: + return "Transitioning"; default: return "Unknown"; } @@ -884,9 +918,10 @@ int core_alua_do_port_transition( struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem; struct t10_alua_tg_pt_gp *tg_pt_gp; unsigned char *md_buf; - int primary; + int primary, valid_states; - if (core_alua_check_transition(new_state, &primary) != 0) + valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states; + if (core_alua_check_transition(new_state, valid_states, &primary) != 0) return -EINVAL; md_buf = kzalloc(l_tg_pt_gp->tg_pt_gp_md_buf_len, GFP_KERNEL); -- cgit v0.10.2 From 1e0b9403bd2e77006ae8dcdf279c0f30c7efc258 Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Tue, 17 Dec 2013 09:18:44 +0100 Subject: target_core_alua: Allocate ALUA metadata on demand We should only allocate ALUA metadata if we're actually going to write them. Signed-off-by: Hannes Reinecke Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index 292ecce..738244b 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -770,16 +770,22 @@ static int core_alua_write_tpg_metadata( */ static int core_alua_update_tpg_primary_metadata( struct t10_alua_tg_pt_gp *tg_pt_gp, - int primary_state, - unsigned char *md_buf) + int primary_state) { + unsigned char *md_buf; struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn; char path[ALUA_METADATA_PATH_LEN]; - int len; + int len, rc; + + md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL); + if (!md_buf) { + pr_err("Unable to allocate buf for ALUA metadata\n"); + return -ENOMEM; + } memset(path, 0, ALUA_METADATA_PATH_LEN); - len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len, + len = snprintf(md_buf, ALUA_MD_BUF_LEN, "tg_pt_gp_id=%hu\n" "alua_access_state=0x%02x\n" "alua_access_status=0x%02x\n", @@ -790,14 +796,15 @@ static int core_alua_update_tpg_primary_metadata( "/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0], config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item)); - return core_alua_write_tpg_metadata(path, md_buf, len); + rc = core_alua_write_tpg_metadata(path, md_buf, len); + kfree(md_buf); + return rc; } static int core_alua_do_transition_tg_pt( struct t10_alua_tg_pt_gp *tg_pt_gp, struct se_port *l_port, struct se_node_acl *nacl, - unsigned char *md_buf, int new_state, int explicit) { @@ -885,8 +892,7 @@ static int core_alua_do_transition_tg_pt( */ if (tg_pt_gp->tg_pt_gp_write_metadata) { mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex); - core_alua_update_tpg_primary_metadata(tg_pt_gp, - new_state, md_buf); + core_alua_update_tpg_primary_metadata(tg_pt_gp, new_state); mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex); } /* @@ -917,19 +923,12 @@ int core_alua_do_port_transition( struct t10_alua_lu_gp *lu_gp; struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem; struct t10_alua_tg_pt_gp *tg_pt_gp; - unsigned char *md_buf; int primary, valid_states; valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states; if (core_alua_check_transition(new_state, valid_states, &primary) != 0) return -EINVAL; - md_buf = kzalloc(l_tg_pt_gp->tg_pt_gp_md_buf_len, GFP_KERNEL); - if (!md_buf) { - pr_err("Unable to allocate buf for ALUA metadata\n"); - return -ENOMEM; - } - local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem; spin_lock(&local_lu_gp_mem->lu_gp_mem_lock); lu_gp = local_lu_gp_mem->lu_gp; @@ -947,10 +946,9 @@ int core_alua_do_port_transition( * success. */ core_alua_do_transition_tg_pt(l_tg_pt_gp, l_port, l_nacl, - md_buf, new_state, explicit); + new_state, explicit); atomic_dec(&lu_gp->lu_gp_ref_cnt); smp_mb__after_atomic_dec(); - kfree(md_buf); return 0; } /* @@ -1000,7 +998,7 @@ int core_alua_do_port_transition( * success. */ core_alua_do_transition_tg_pt(tg_pt_gp, port, - nacl, md_buf, new_state, explicit); + nacl, new_state, explicit); spin_lock(&dev->t10_alua.tg_pt_gps_lock); atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); @@ -1022,7 +1020,6 @@ int core_alua_do_port_transition( atomic_dec(&lu_gp->lu_gp_ref_cnt); smp_mb__after_atomic_dec(); - kfree(md_buf); return 0; } @@ -1031,13 +1028,18 @@ int core_alua_do_port_transition( */ static int core_alua_update_tpg_secondary_metadata( struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, - struct se_port *port, - unsigned char *md_buf, - u32 md_buf_len) + struct se_port *port) { + unsigned char *md_buf; struct se_portal_group *se_tpg = port->sep_tpg; char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN]; - int len; + int len, rc; + + md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL); + if (!md_buf) { + pr_err("Unable to allocate buf for ALUA metadata\n"); + return -ENOMEM; + } memset(path, 0, ALUA_METADATA_PATH_LEN); memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN); @@ -1049,7 +1051,7 @@ static int core_alua_update_tpg_secondary_metadata( snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu", se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg)); - len = snprintf(md_buf, md_buf_len, "alua_tg_pt_offline=%d\n" + len = snprintf(md_buf, ALUA_MD_BUF_LEN, "alua_tg_pt_offline=%d\n" "alua_tg_pt_status=0x%02x\n", atomic_read(&port->sep_tg_pt_secondary_offline), port->sep_tg_pt_secondary_stat); @@ -1058,7 +1060,10 @@ static int core_alua_update_tpg_secondary_metadata( se_tpg->se_tpg_tfo->get_fabric_name(), wwn, port->sep_lun->unpacked_lun); - return core_alua_write_tpg_metadata(path, md_buf, len); + rc = core_alua_write_tpg_metadata(path, md_buf, len); + kfree(md_buf); + + return rc; } static int core_alua_set_tg_pt_secondary_state( @@ -1068,8 +1073,6 @@ static int core_alua_set_tg_pt_secondary_state( int offline) { struct t10_alua_tg_pt_gp *tg_pt_gp; - unsigned char *md_buf; - u32 md_buf_len; int trans_delay_msecs; spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); @@ -1090,7 +1093,6 @@ static int core_alua_set_tg_pt_secondary_state( else atomic_set(&port->sep_tg_pt_secondary_offline, 0); - md_buf_len = tg_pt_gp->tg_pt_gp_md_buf_len; port->sep_tg_pt_secondary_stat = (explicit) ? ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG : ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA; @@ -1112,18 +1114,9 @@ static int core_alua_set_tg_pt_secondary_state( * secondary state and status */ if (port->sep_tg_pt_secondary_write_md) { - md_buf = kzalloc(md_buf_len, GFP_KERNEL); - if (!md_buf) { - pr_err("Unable to allocate md_buf for" - " secondary ALUA access metadata\n"); - return -ENOMEM; - } mutex_lock(&port->sep_tg_pt_md_mutex); - core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port, - md_buf, md_buf_len); + core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port); mutex_unlock(&port->sep_tg_pt_md_mutex); - - kfree(md_buf); } return 0; @@ -1382,7 +1375,6 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev, spin_lock_init(&tg_pt_gp->tg_pt_gp_lock); atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0); tg_pt_gp->tg_pt_gp_dev = dev; - tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN; atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED); /* diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h index 88e2e83..1a152cd 100644 --- a/drivers/target/target_core_alua.h +++ b/drivers/target/target_core_alua.h @@ -78,6 +78,9 @@ */ #define ALUA_SECONDARY_METADATA_WWN_LEN 256 +/* Used by core_alua_update_tpg_(primary,secondary)_metadata */ +#define ALUA_MD_BUF_LEN 1024 + extern struct kmem_cache *t10_alua_lu_gp_cache; extern struct kmem_cache *t10_alua_lu_gp_mem_cache; extern struct kmem_cache *t10_alua_tg_pt_gp_cache; diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index d6f96c7..bf9a4d6 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -291,9 +291,6 @@ struct t10_alua_tg_pt_gp { int tg_pt_gp_implicit_trans_secs; int tg_pt_gp_pref; int tg_pt_gp_write_metadata; - /* Used by struct t10_alua_tg_pt_gp->tg_pt_gp_md_buf_len */ -#define ALUA_MD_BUF_LEN 1024 - u32 tg_pt_gp_md_buf_len; u32 tg_pt_gp_members; atomic_t tg_pt_gp_alua_access_state; atomic_t tg_pt_gp_ref_cnt; -- cgit v0.10.2 From dfbce75ac8b4ed5dc51f8f144ebb274db940388d Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Tue, 17 Dec 2013 09:18:45 +0100 Subject: target_core_alua: store old and pending ALUA state During state transition we should be storing both the original and the pending state. Signed-off-by: Hannes Reinecke Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index 738244b..4805e97 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -812,12 +812,15 @@ static int core_alua_do_transition_tg_pt( struct se_lun_acl *lacl; struct se_port *port; struct t10_alua_tg_pt_gp_member *mem; - int old_state = 0; + /* * Save the old primary ALUA access state, and set the current state * to ALUA_ACCESS_STATE_TRANSITION. */ - old_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); + tg_pt_gp->tg_pt_gp_alua_previous_state = + atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); + tg_pt_gp->tg_pt_gp_alua_pending_state = new_state; + atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, ALUA_ACCESS_STATE_TRANSITION); tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ? @@ -898,13 +901,15 @@ static int core_alua_do_transition_tg_pt( /* * Set the current primary ALUA access state to the requested new state */ - atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state); + atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, + tg_pt_gp->tg_pt_gp_alua_pending_state); pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu" " from primary access state %s to %s\n", (explicit) ? "explicit" : "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), - tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state), - core_alua_dump_state(new_state)); + tg_pt_gp->tg_pt_gp_id, + core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_previous_state), + core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state)); return 0; } diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index bf9a4d6..2c723cf 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -284,6 +284,8 @@ struct t10_alua_tg_pt_gp { u16 tg_pt_gp_id; int tg_pt_gp_valid_id; int tg_pt_gp_alua_supported_states; + int tg_pt_gp_alua_pending_state; + int tg_pt_gp_alua_previous_state; int tg_pt_gp_alua_access_status; int tg_pt_gp_alua_access_type; int tg_pt_gp_nonop_delay_msecs; -- cgit v0.10.2 From 9c6e164c8c28c1f9e24441674f8b8d81551c906b Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Tue, 17 Dec 2013 09:18:46 +0100 Subject: target_core_alua: Use workqueue for ALUA transitioning Use a workqueue for processing ALUA state transitions; this allows us to process implicit delay properly. Signed-off-by: Hannes Reinecke Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index 4805e97..01f0c71 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -769,8 +769,7 @@ static int core_alua_write_tpg_metadata( * Called with tg_pt_gp->tg_pt_gp_md_mutex held */ static int core_alua_update_tpg_primary_metadata( - struct t10_alua_tg_pt_gp *tg_pt_gp, - int primary_state) + struct t10_alua_tg_pt_gp *tg_pt_gp) { unsigned char *md_buf; struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn; @@ -789,7 +788,8 @@ static int core_alua_update_tpg_primary_metadata( "tg_pt_gp_id=%hu\n" "alua_access_state=0x%02x\n" "alua_access_status=0x%02x\n", - tg_pt_gp->tg_pt_gp_id, primary_state, + tg_pt_gp->tg_pt_gp_id, + tg_pt_gp->tg_pt_gp_alua_pending_state, tg_pt_gp->tg_pt_gp_alua_access_status); snprintf(path, ALUA_METADATA_PATH_LEN, @@ -801,36 +801,17 @@ static int core_alua_update_tpg_primary_metadata( return rc; } -static int core_alua_do_transition_tg_pt( - struct t10_alua_tg_pt_gp *tg_pt_gp, - struct se_port *l_port, - struct se_node_acl *nacl, - int new_state, - int explicit) +static void core_alua_do_transition_tg_pt_work(struct work_struct *work) { + struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work, + struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work.work); + struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; struct se_dev_entry *se_deve; struct se_lun_acl *lacl; struct se_port *port; struct t10_alua_tg_pt_gp_member *mem; - - /* - * Save the old primary ALUA access state, and set the current state - * to ALUA_ACCESS_STATE_TRANSITION. - */ - tg_pt_gp->tg_pt_gp_alua_previous_state = - atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); - tg_pt_gp->tg_pt_gp_alua_pending_state = new_state; - - atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, - ALUA_ACCESS_STATE_TRANSITION); - tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ? - ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG : - ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA; - /* - * Check for the optional ALUA primary state transition delay - */ - if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0) - msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs); + bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status == + ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG); spin_lock(&tg_pt_gp->tg_pt_gp_lock); list_for_each_entry(mem, &tg_pt_gp->tg_pt_gp_mem_list, @@ -865,9 +846,12 @@ static int core_alua_do_transition_tg_pt( if (!lacl) continue; - if (explicit && - (nacl != NULL) && (nacl == lacl->se_lun_nacl) && - (l_port != NULL) && (l_port == port)) + if ((tg_pt_gp->tg_pt_gp_alua_access_status == + ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) && + (tg_pt_gp->tg_pt_gp_alua_nacl != NULL) && + (tg_pt_gp->tg_pt_gp_alua_nacl == lacl->se_lun_nacl) && + (tg_pt_gp->tg_pt_gp_alua_port != NULL) && + (tg_pt_gp->tg_pt_gp_alua_port == port)) continue; core_scsi3_ua_allocate(lacl->se_lun_nacl, @@ -895,7 +879,7 @@ static int core_alua_do_transition_tg_pt( */ if (tg_pt_gp->tg_pt_gp_write_metadata) { mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex); - core_alua_update_tpg_primary_metadata(tg_pt_gp, new_state); + core_alua_update_tpg_primary_metadata(tg_pt_gp); mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex); } /* @@ -910,6 +894,87 @@ static int core_alua_do_transition_tg_pt( tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_previous_state), core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state)); + spin_lock(&dev->t10_alua.tg_pt_gps_lock); + atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); + smp_mb__after_atomic_dec(); + spin_unlock(&dev->t10_alua.tg_pt_gps_lock); + + if (tg_pt_gp->tg_pt_gp_transition_complete) + complete(tg_pt_gp->tg_pt_gp_transition_complete); +} + +static int core_alua_do_transition_tg_pt( + struct t10_alua_tg_pt_gp *tg_pt_gp, + int new_state, + int explicit) +{ + struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; + DECLARE_COMPLETION_ONSTACK(wait); + + /* Nothing to be done here */ + if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state) + return 0; + + if (new_state == ALUA_ACCESS_STATE_TRANSITION) + return -EAGAIN; + + /* + * Flush any pending transitions + */ + if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs && + atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == + ALUA_ACCESS_STATE_TRANSITION) { + /* Just in case */ + tg_pt_gp->tg_pt_gp_alua_pending_state = new_state; + tg_pt_gp->tg_pt_gp_transition_complete = &wait; + flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work); + wait_for_completion(&wait); + tg_pt_gp->tg_pt_gp_transition_complete = NULL; + return 0; + } + + /* + * Save the old primary ALUA access state, and set the current state + * to ALUA_ACCESS_STATE_TRANSITION. + */ + tg_pt_gp->tg_pt_gp_alua_previous_state = + atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); + tg_pt_gp->tg_pt_gp_alua_pending_state = new_state; + + atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, + ALUA_ACCESS_STATE_TRANSITION); + tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ? + ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG : + ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA; + + /* + * Check for the optional ALUA primary state transition delay + */ + if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0) + msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs); + + /* + * Take a reference for workqueue item + */ + spin_lock(&dev->t10_alua.tg_pt_gps_lock); + atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); + smp_mb__after_atomic_inc(); + spin_unlock(&dev->t10_alua.tg_pt_gps_lock); + + if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) { + unsigned long transition_tmo; + + transition_tmo = tg_pt_gp->tg_pt_gp_implicit_trans_secs * HZ; + queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq, + &tg_pt_gp->tg_pt_gp_transition_work, + transition_tmo); + } else { + tg_pt_gp->tg_pt_gp_transition_complete = &wait; + queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq, + &tg_pt_gp->tg_pt_gp_transition_work, 0); + wait_for_completion(&wait); + tg_pt_gp->tg_pt_gp_transition_complete = NULL; + } return 0; } @@ -923,12 +988,10 @@ int core_alua_do_port_transition( int explicit) { struct se_device *dev; - struct se_port *port; - struct se_node_acl *nacl; struct t10_alua_lu_gp *lu_gp; struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem; struct t10_alua_tg_pt_gp *tg_pt_gp; - int primary, valid_states; + int primary, valid_states, rc = 0; valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states; if (core_alua_check_transition(new_state, valid_states, &primary) != 0) @@ -950,11 +1013,13 @@ int core_alua_do_port_transition( * core_alua_do_transition_tg_pt() will always return * success. */ - core_alua_do_transition_tg_pt(l_tg_pt_gp, l_port, l_nacl, - new_state, explicit); + l_tg_pt_gp->tg_pt_gp_alua_port = l_port; + l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl; + rc = core_alua_do_transition_tg_pt(l_tg_pt_gp, + new_state, explicit); atomic_dec(&lu_gp->lu_gp_ref_cnt); smp_mb__after_atomic_dec(); - return 0; + return rc; } /* * For all other LU groups aside from 'default_lu_gp', walk all of @@ -989,11 +1054,11 @@ int core_alua_do_port_transition( continue; if (l_tg_pt_gp == tg_pt_gp) { - port = l_port; - nacl = l_nacl; + tg_pt_gp->tg_pt_gp_alua_port = l_port; + tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl; } else { - port = NULL; - nacl = NULL; + tg_pt_gp->tg_pt_gp_alua_port = NULL; + tg_pt_gp->tg_pt_gp_alua_nacl = NULL; } atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); smp_mb__after_atomic_inc(); @@ -1002,12 +1067,14 @@ int core_alua_do_port_transition( * core_alua_do_transition_tg_pt() will always return * success. */ - core_alua_do_transition_tg_pt(tg_pt_gp, port, - nacl, new_state, explicit); + rc = core_alua_do_transition_tg_pt(tg_pt_gp, + new_state, explicit); spin_lock(&dev->t10_alua.tg_pt_gps_lock); atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); smp_mb__after_atomic_dec(); + if (rc) + break; } spin_unlock(&dev->t10_alua.tg_pt_gps_lock); @@ -1017,15 +1084,18 @@ int core_alua_do_port_transition( } spin_unlock(&lu_gp->lu_gp_lock); - pr_debug("Successfully processed LU Group: %s all ALUA TG PT" - " Group IDs: %hu %s transition to primary state: %s\n", - config_item_name(&lu_gp->lu_gp_group.cg_item), - l_tg_pt_gp->tg_pt_gp_id, (explicit) ? "explicit" : "implicit", - core_alua_dump_state(new_state)); + if (!rc) { + pr_debug("Successfully processed LU Group: %s all ALUA TG PT" + " Group IDs: %hu %s transition to primary state: %s\n", + config_item_name(&lu_gp->lu_gp_group.cg_item), + l_tg_pt_gp->tg_pt_gp_id, + (explicit) ? "explicit" : "implicit", + core_alua_dump_state(new_state)); + } atomic_dec(&lu_gp->lu_gp_ref_cnt); smp_mb__after_atomic_dec(); - return 0; + return rc; } /* @@ -1379,6 +1449,8 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev, mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex); spin_lock_init(&tg_pt_gp->tg_pt_gp_lock); atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0); + INIT_DELAYED_WORK(&tg_pt_gp->tg_pt_gp_transition_work, + core_alua_do_transition_tg_pt_work); tg_pt_gp->tg_pt_gp_dev = dev; atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED); @@ -1507,6 +1579,8 @@ void core_alua_free_tg_pt_gp( dev->t10_alua.alua_tg_pt_gps_counter--; spin_unlock(&dev->t10_alua.tg_pt_gps_lock); + flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work); + /* * Allow a struct t10_alua_tg_pt_gp_member * referenced by * core_alua_get_tg_pt_gp_by_name() in diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 2c723cf..e653110 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -302,6 +302,10 @@ struct t10_alua_tg_pt_gp { struct config_group tg_pt_gp_group; struct list_head tg_pt_gp_list; struct list_head tg_pt_gp_mem_list; + struct se_port *tg_pt_gp_alua_port; + struct se_node_acl *tg_pt_gp_alua_nacl; + struct delayed_work tg_pt_gp_transition_work; + struct completion *tg_pt_gp_transition_complete; }; struct t10_alua_tg_pt_gp_member { -- cgit v0.10.2 From 03ba84ca95a84dba9b42492edd2632dc3c7eb359 Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Tue, 17 Dec 2013 09:18:47 +0100 Subject: target_core: simplify scsi_name_len calculation scsi_name_len in spc_emulate_evpd_83 is calculated twice, with the results of the first calculation discarded. So remove it. And check for the maximum allowed length, too. Signed-off-by: Hannes Reinecke Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index 021c3f4..603c411 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c @@ -365,16 +365,6 @@ check_lu_gp: * section 7.5.1 Table 362 */ check_scsi_name: - scsi_name_len = strlen(tpg->se_tpg_tfo->tpg_get_wwn(tpg)); - /* UTF-8 ",t,0x<16-bit TPGT>" + NULL Terminator */ - scsi_name_len += 10; - /* Check for 4-byte padding */ - padding = ((-scsi_name_len) & 3); - if (padding != 0) - scsi_name_len += padding; - /* Header size + Designation descriptor */ - scsi_name_len += 4; - buf[off] = (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4); buf[off++] |= 0x3; /* CODE SET == UTF-8 */ @@ -402,8 +392,11 @@ check_scsi_name: * shall be no larger than 256 and shall be a multiple * of four. */ + padding = ((-scsi_name_len) & 3); if (padding) scsi_name_len += padding; + if (scsi_name_len > 256) + scsi_name_len = 256; buf[off-1] = scsi_name_len; off += scsi_name_len; -- cgit v0.10.2 From fbfe858fea2a45df6339eb03dd1715b51f1bdc92 Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Tue, 17 Dec 2013 09:18:48 +0100 Subject: target_core_spc: Include target device descriptor in VPD page 83 We should be including a descriptor referring to the target device to allow identification of different TCM instances. (nab: Bump SE_INQUIRY_BUF to 1024 bytes to handle 2x 256 byte SCSI names) Signed-off-by: Hannes Reinecke Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index 603c411..39054d9 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c @@ -267,7 +267,7 @@ check_t10_vend_desc: port = lun->lun_sep; if (port) { struct t10_alua_lu_gp *lu_gp; - u32 padding, scsi_name_len; + u32 padding, scsi_name_len, scsi_target_len; u16 lu_gp_id = 0; u16 tg_pt_gp_id = 0; u16 tpgt; @@ -402,6 +402,47 @@ check_scsi_name: off += scsi_name_len; /* Header size + Designation descriptor */ len += (scsi_name_len + 4); + + /* + * Target device designator + */ + buf[off] = + (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4); + buf[off++] |= 0x3; /* CODE SET == UTF-8 */ + buf[off] = 0x80; /* Set PIV=1 */ + /* Set ASSOCIATION == target device: 10b */ + buf[off] |= 0x20; + /* DESIGNATOR TYPE == SCSI name string */ + buf[off++] |= 0x8; + off += 2; /* Skip over Reserved and length */ + /* + * SCSI name string identifer containing, $FABRIC_MOD + * dependent information. For LIO-Target and iSCSI + * Target Port, this means "" in + * UTF-8 encoding. + */ + scsi_target_len = sprintf(&buf[off], "%s", + tpg->se_tpg_tfo->tpg_get_wwn(tpg)); + scsi_target_len += 1 /* Include NULL terminator */; + /* + * The null-terminated, null-padded (see 4.4.2) SCSI + * NAME STRING field contains a UTF-8 format string. + * The number of bytes in the SCSI NAME STRING field + * (i.e., the value in the DESIGNATOR LENGTH field) + * shall be no larger than 256 and shall be a multiple + * of four. + */ + padding = ((-scsi_target_len) & 3); + if (padding) + scsi_target_len += padding; + if (scsi_name_len > 256) + scsi_name_len = 256; + + buf[off-1] = scsi_target_len; + off += scsi_target_len; + + /* Header size + Designation descriptor */ + len += (scsi_target_len + 4); } buf[2] = ((len >> 8) & 0xff); buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */ diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index e653110..6c80015 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -112,7 +112,7 @@ /* Queue Algorithm Modifier default for restricted reordering in control mode page */ #define DA_EMULATE_REST_REORD 0 -#define SE_INQUIRY_BUF 512 +#define SE_INQUIRY_BUF 768 #define SE_MODE_PAGE_BUF 512 #define SE_SENSE_BUF 96 -- cgit v0.10.2 From c66094bf325ee406b92298d73089ee25484a0263 Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Tue, 17 Dec 2013 09:18:49 +0100 Subject: target_core_alua: Referrals infrastructure Add infrastructure for referrals. v2 changes: - Fix unsigned long long division in core_alua_state_lba_dependent on 32-bit (Fengguang + Chen + Hannes) - Fix compile warning in core_alua_state_lba_dependent (nab) - Convert segment_* + sectors variables in core_alua_state_lba_dependent to u64 (Hannes) Signed-off-by: Hannes Reinecke Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index 01f0c71..0843c8f 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -58,6 +58,75 @@ static LIST_HEAD(lu_gps_list); struct t10_alua_lu_gp *default_lu_gp; /* + * REPORT REFERRALS + * + * See sbc3r35 section 5.23 + */ +sense_reason_t +target_emulate_report_referrals(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + struct t10_alua_lba_map *map; + struct t10_alua_lba_map_member *map_mem; + unsigned char *buf; + u32 rd_len = 0, off; + + if (cmd->data_length < 4) { + pr_warn("REPORT REFERRALS allocation length %u too" + " small\n", cmd->data_length); + return TCM_INVALID_CDB_FIELD; + } + + buf = transport_kmap_data_sg(cmd); + if (!buf) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + + off = 4; + spin_lock(&dev->t10_alua.lba_map_lock); + if (list_empty(&dev->t10_alua.lba_map_list)) { + spin_unlock(&dev->t10_alua.lba_map_lock); + transport_kunmap_data_sg(cmd); + + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + + list_for_each_entry(map, &dev->t10_alua.lba_map_list, + lba_map_list) { + int desc_num = off + 3; + int pg_num; + + off += 4; + put_unaligned_be64(map->lba_map_first_lba, &buf[off]); + off += 8; + put_unaligned_be64(map->lba_map_last_lba, &buf[off]); + off += 8; + rd_len += 20; + pg_num = 0; + list_for_each_entry(map_mem, &map->lba_map_mem_list, + lba_map_mem_list) { + buf[off++] = map_mem->lba_map_mem_alua_state & 0x0f; + off++; + buf[off++] = (map_mem->lba_map_mem_alua_pg_id >> 8) & 0xff; + buf[off++] = (map_mem->lba_map_mem_alua_pg_id & 0xff); + rd_len += 4; + pg_num++; + } + buf[desc_num] = pg_num; + } + spin_unlock(&dev->t10_alua.lba_map_lock); + + /* + * Set the RETURN DATA LENGTH set in the header of the DataIN Payload + */ + put_unaligned_be16(rd_len, &buf[2]); + + transport_kunmap_data_sg(cmd); + + target_complete_cmd(cmd, GOOD); + return 0; +} + +/* * REPORT_TARGET_PORT_GROUPS * * See spc4r17 section 6.27 @@ -391,6 +460,81 @@ static inline int core_alua_state_nonoptimized( return 0; } +static inline int core_alua_state_lba_dependent( + struct se_cmd *cmd, + struct t10_alua_tg_pt_gp *tg_pt_gp, + u8 *alua_ascq) +{ + struct se_device *dev = cmd->se_dev; + u64 segment_size, segment_mult, sectors, lba; + + /* Only need to check for cdb actually containing LBAs */ + if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB)) + return 0; + + spin_lock(&dev->t10_alua.lba_map_lock); + segment_size = dev->t10_alua.lba_map_segment_size; + segment_mult = dev->t10_alua.lba_map_segment_multiplier; + sectors = cmd->data_length / dev->dev_attrib.block_size; + + lba = cmd->t_task_lba; + while (lba < cmd->t_task_lba + sectors) { + struct t10_alua_lba_map *cur_map = NULL, *map; + struct t10_alua_lba_map_member *map_mem; + + list_for_each_entry(map, &dev->t10_alua.lba_map_list, + lba_map_list) { + u64 start_lba, last_lba; + u64 first_lba = map->lba_map_first_lba; + + if (segment_mult) { + u64 tmp = lba; + start_lba = sector_div(tmp, segment_size * segment_mult); + + last_lba = first_lba + segment_size - 1; + if (start_lba >= first_lba && + start_lba <= last_lba) { + lba += segment_size; + cur_map = map; + break; + } + } else { + last_lba = map->lba_map_last_lba; + if (lba >= first_lba && lba <= last_lba) { + lba = last_lba + 1; + cur_map = map; + break; + } + } + } + if (!cur_map) { + spin_unlock(&dev->t10_alua.lba_map_lock); + *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE; + return 1; + } + list_for_each_entry(map_mem, &cur_map->lba_map_mem_list, + lba_map_mem_list) { + if (map_mem->lba_map_mem_alua_pg_id != + tg_pt_gp->tg_pt_gp_id) + continue; + switch(map_mem->lba_map_mem_alua_state) { + case ALUA_ACCESS_STATE_STANDBY: + spin_unlock(&dev->t10_alua.lba_map_lock); + *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY; + return 1; + case ALUA_ACCESS_STATE_UNAVAILABLE: + spin_unlock(&dev->t10_alua.lba_map_lock); + *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE; + return 1; + default: + break; + } + } + } + spin_unlock(&dev->t10_alua.lba_map_lock); + return 0; +} + static inline int core_alua_state_standby( struct se_cmd *cmd, unsigned char *cdb, @@ -588,6 +732,9 @@ target_alua_state_check(struct se_cmd *cmd) case ALUA_ACCESS_STATE_TRANSITION: ret = core_alua_state_transition(cmd, cdb, &alua_ascq); break; + case ALUA_ACCESS_STATE_LBA_DEPENDENT: + ret = core_alua_state_lba_dependent(cmd, tg_pt_gp, &alua_ascq); + break; /* * OFFLINE is a secondary ALUA target port group access state, that is * handled above with struct se_port->sep_tg_pt_secondary_offline=1 @@ -650,6 +797,11 @@ core_alua_check_transition(int state, int valid, int *primary) goto not_supported; *primary = 1; break; + case ALUA_ACCESS_STATE_LBA_DEPENDENT: + if (!(valid & ALUA_LBD_SUP)) + goto not_supported; + *primary = 1; + break; case ALUA_ACCESS_STATE_OFFLINE: /* * OFFLINE state is defined as a secondary target port @@ -685,6 +837,8 @@ static char *core_alua_dump_state(int state) return "Active/Optimized"; case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: return "Active/NonOptimized"; + case ALUA_ACCESS_STATE_LBA_DEPENDENT: + return "LBA Dependent"; case ALUA_ACCESS_STATE_STANDBY: return "Standby"; case ALUA_ACCESS_STATE_UNAVAILABLE: diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h index 1a152cd..47950cd 100644 --- a/drivers/target/target_core_alua.h +++ b/drivers/target/target_core_alua.h @@ -13,12 +13,13 @@ /* * ASYMMETRIC ACCESS STATE field * - * from spc4r17 section 6.27 Table 245 + * from spc4r36j section 6.37 Table 307 */ #define ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED 0x0 #define ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED 0x1 #define ALUA_ACCESS_STATE_STANDBY 0x2 #define ALUA_ACCESS_STATE_UNAVAILABLE 0x3 +#define ALUA_ACCESS_STATE_LBA_DEPENDENT 0x4 #define ALUA_ACCESS_STATE_OFFLINE 0xe #define ALUA_ACCESS_STATE_TRANSITION 0xf @@ -88,6 +89,7 @@ extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; extern sense_reason_t target_emulate_report_target_port_groups(struct se_cmd *); extern sense_reason_t target_emulate_set_target_port_groups(struct se_cmd *); +extern sense_reason_t target_emulate_report_referrals(struct se_cmd *); extern int core_alua_check_nonop_delay(struct se_cmd *); extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *, struct se_device *, struct se_port *, diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index a1c23d1..e0a47f5 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -2054,6 +2054,13 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state( " transition while TPGS_IMPLICIT_ALUA is disabled\n"); return -EINVAL; } + if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA && + new_state == ALUA_ACCESS_STATE_LBA_DEPENDENT) { + /* LBA DEPENDENT is only allowed with implicit ALUA */ + pr_err("Unable to process implicit configfs ALUA transition" + " while explicit ALUA management is enabled\n"); + return -EINVAL; + } ret = core_alua_do_port_transition(tg_pt_gp, dev, NULL, NULL, new_state, 0); @@ -2188,7 +2195,7 @@ SE_DEV_ALUA_SUPPORT_STATE_SHOW(lba_dependent, tg_pt_gp_alua_supported_states, ALUA_LBD_SUP); SE_DEV_ALUA_SUPPORT_STATE_STORE(lba_dependent, tg_pt_gp_alua_supported_states, ALUA_LBD_SUP); -SE_DEV_ALUA_TG_PT_ATTR(alua_support_lba_dependent, S_IRUGO | S_IWUSR); +SE_DEV_ALUA_TG_PT_ATTR(alua_support_lba_dependent, S_IRUGO); SE_DEV_ALUA_SUPPORT_STATE_SHOW(unavailable, tg_pt_gp_alua_supported_states, ALUA_U_SUP); diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index dbd78a1..88b4fb2 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -1439,6 +1439,8 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) spin_lock_init(&dev->t10_pr.aptpl_reg_lock); INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list); spin_lock_init(&dev->t10_alua.tg_pt_gps_lock); + INIT_LIST_HEAD(&dev->t10_alua.lba_map_list); + spin_lock_init(&dev->t10_alua.lba_map_lock); dev->t10_wwn.t10_dev = dev; dev->t10_alua.t10_dev = dev; diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 52ae54e..6863dbe 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -33,7 +33,7 @@ #include "target_core_internal.h" #include "target_core_ua.h" - +#include "target_core_alua.h" static sense_reason_t sbc_emulate_readcapacity(struct se_cmd *cmd) @@ -731,6 +731,9 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) case SAI_READ_CAPACITY_16: cmd->execute_cmd = sbc_emulate_readcapacity_16; break; + case SAI_REPORT_REFERRALS: + cmd->execute_cmd = target_emulate_report_referrals; + break; default: pr_err("Unsupported SA: 0x%02x\n", cmd->t_task_cdb[1] & 0x1f); diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index 39054d9..f9889fd 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c @@ -476,6 +476,11 @@ spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) /* If WriteCache emulation is enabled, set V_SUP */ if (spc_check_dev_wce(dev)) buf[6] = 0x01; + /* If an LBA map is present set R_SUP */ + spin_lock(&cmd->se_dev->t10_alua.lba_map_lock); + if (!list_empty(&dev->t10_alua.lba_map_list)) + buf[8] = 0x10; + spin_unlock(&cmd->se_dev->t10_alua.lba_map_lock); return 0; } @@ -634,6 +639,20 @@ spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf) return 0; } +/* Referrals VPD page */ +static sense_reason_t +spc_emulate_evpd_b3(struct se_cmd *cmd, unsigned char *buf) +{ + struct se_device *dev = cmd->se_dev; + + buf[0] = dev->transport->get_device_type(dev); + buf[3] = 0x0c; + put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[8]); + put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[12]); + + return 0; +} + static sense_reason_t spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf); @@ -648,6 +667,7 @@ static struct { { .page = 0xb0, .emulate = spc_emulate_evpd_b0 }, { .page = 0xb1, .emulate = spc_emulate_evpd_b1 }, { .page = 0xb2, .emulate = spc_emulate_evpd_b2 }, + { .page = 0xb3, .emulate = spc_emulate_evpd_b3 }, }; /* supported vital product data pages */ diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h index 66d42ed..0a4edfe 100644 --- a/include/scsi/scsi.h +++ b/include/scsi/scsi.h @@ -155,6 +155,7 @@ enum scsi_timeouts { /* values for service action in */ #define SAI_READ_CAPACITY_16 0x10 #define SAI_GET_LBA_STATUS 0x12 +#define SAI_REPORT_REFERRALS 0x13 /* values for VARIABLE_LENGTH_CMD service action codes * see spc4r17 Section D.3.5, table D.7 and D.8 */ #define VLC_SA_RECEIVE_CREDENTIAL 0x1800 diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 6c80015..1ba19a4 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -247,10 +247,28 @@ typedef enum { struct se_cmd; +struct t10_alua_lba_map_member { + struct list_head lba_map_mem_list; + int lba_map_mem_alua_state; + int lba_map_mem_alua_pg_id; +}; + +struct t10_alua_lba_map { + u64 lba_map_first_lba; + u64 lba_map_last_lba; + struct list_head lba_map_list; + struct list_head lba_map_mem_list; +}; + struct t10_alua { /* ALUA Target Port Group ID */ u16 alua_tg_pt_gps_counter; u32 alua_tg_pt_gps_count; + /* Referrals support */ + spinlock_t lba_map_lock; + u32 lba_map_segment_size; + u32 lba_map_segment_multiplier; + struct list_head lba_map_list; spinlock_t tg_pt_gps_lock; struct se_device *t10_dev; /* Used for default ALUA Target Port Group */ -- cgit v0.10.2 From 229d4f112fd6d1562b6d5324c4cb8f8d097bac54 Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Tue, 17 Dec 2013 09:18:50 +0100 Subject: target_core_alua: Referrals configfs integration Referrals need an LBA map, which needs to be kept consistent across all target port groups. So instead of tying the map to the target port groups I've implemented a single attribute containing the entire map. Signed-off-by: Hannes Reinecke Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index 0843c8f..e73edca 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -1351,6 +1351,107 @@ static int core_alua_set_tg_pt_secondary_state( return 0; } +struct t10_alua_lba_map * +core_alua_allocate_lba_map(struct list_head *list, + u64 first_lba, u64 last_lba) +{ + struct t10_alua_lba_map *lba_map; + + lba_map = kmem_cache_zalloc(t10_alua_lba_map_cache, GFP_KERNEL); + if (!lba_map) { + pr_err("Unable to allocate struct t10_alua_lba_map\n"); + return ERR_PTR(-ENOMEM); + } + INIT_LIST_HEAD(&lba_map->lba_map_mem_list); + lba_map->lba_map_first_lba = first_lba; + lba_map->lba_map_last_lba = last_lba; + + list_add_tail(&lba_map->lba_map_list, list); + return lba_map; +} + +int +core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *lba_map, + int pg_id, int state) +{ + struct t10_alua_lba_map_member *lba_map_mem; + + list_for_each_entry(lba_map_mem, &lba_map->lba_map_mem_list, + lba_map_mem_list) { + if (lba_map_mem->lba_map_mem_alua_pg_id == pg_id) { + pr_err("Duplicate pg_id %d in lba_map\n", pg_id); + return -EINVAL; + } + } + + lba_map_mem = kmem_cache_zalloc(t10_alua_lba_map_mem_cache, GFP_KERNEL); + if (!lba_map_mem) { + pr_err("Unable to allocate struct t10_alua_lba_map_mem\n"); + return -ENOMEM; + } + lba_map_mem->lba_map_mem_alua_state = state; + lba_map_mem->lba_map_mem_alua_pg_id = pg_id; + + list_add_tail(&lba_map_mem->lba_map_mem_list, + &lba_map->lba_map_mem_list); + return 0; +} + +void +core_alua_free_lba_map(struct list_head *lba_list) +{ + struct t10_alua_lba_map *lba_map, *lba_map_tmp; + struct t10_alua_lba_map_member *lba_map_mem, *lba_map_mem_tmp; + + list_for_each_entry_safe(lba_map, lba_map_tmp, lba_list, + lba_map_list) { + list_for_each_entry_safe(lba_map_mem, lba_map_mem_tmp, + &lba_map->lba_map_mem_list, + lba_map_mem_list) { + list_del(&lba_map_mem->lba_map_mem_list); + kmem_cache_free(t10_alua_lba_map_mem_cache, + lba_map_mem); + } + list_del(&lba_map->lba_map_list); + kmem_cache_free(t10_alua_lba_map_cache, lba_map); + } +} + +void +core_alua_set_lba_map(struct se_device *dev, struct list_head *lba_map_list, + int segment_size, int segment_mult) +{ + struct list_head old_lba_map_list; + struct t10_alua_tg_pt_gp *tg_pt_gp; + int activate = 0, supported; + + INIT_LIST_HEAD(&old_lba_map_list); + spin_lock(&dev->t10_alua.lba_map_lock); + dev->t10_alua.lba_map_segment_size = segment_size; + dev->t10_alua.lba_map_segment_multiplier = segment_mult; + list_splice_init(&dev->t10_alua.lba_map_list, &old_lba_map_list); + if (lba_map_list) { + list_splice_init(lba_map_list, &dev->t10_alua.lba_map_list); + activate = 1; + } + spin_unlock(&dev->t10_alua.lba_map_lock); + spin_lock(&dev->t10_alua.tg_pt_gps_lock); + list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list, + tg_pt_gp_list) { + + if (!tg_pt_gp->tg_pt_gp_valid_id) + continue; + supported = tg_pt_gp->tg_pt_gp_alua_supported_states; + if (activate) + supported |= ALUA_LBD_SUP; + else + supported &= ~ALUA_LBD_SUP; + tg_pt_gp->tg_pt_gp_alua_supported_states = supported; + } + spin_unlock(&dev->t10_alua.tg_pt_gps_lock); + core_alua_free_lba_map(&old_lba_map_list); +} + struct t10_alua_lu_gp * core_alua_allocate_lu_gp(const char *name, int def_group) { diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h index 47950cd..0a7d65e 100644 --- a/drivers/target/target_core_alua.h +++ b/drivers/target/target_core_alua.h @@ -86,6 +86,8 @@ extern struct kmem_cache *t10_alua_lu_gp_cache; extern struct kmem_cache *t10_alua_lu_gp_mem_cache; extern struct kmem_cache *t10_alua_tg_pt_gp_cache; extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; +extern struct kmem_cache *t10_alua_lba_map_cache; +extern struct kmem_cache *t10_alua_lba_map_mem_cache; extern sense_reason_t target_emulate_report_target_port_groups(struct se_cmd *); extern sense_reason_t target_emulate_set_target_port_groups(struct se_cmd *); @@ -95,6 +97,12 @@ extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *, struct se_device *, struct se_port *, struct se_node_acl *, int, int); extern char *core_alua_dump_status(int); +extern struct t10_alua_lba_map *core_alua_allocate_lba_map( + struct list_head *, u64, u64); +extern int core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *, int, int); +extern void core_alua_free_lba_map(struct list_head *); +extern void core_alua_set_lba_map(struct se_device *, struct list_head *, + int, int); extern struct t10_alua_lu_gp *core_alua_allocate_lu_gp(const char *, int); extern int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *, u16); extern void core_alua_free_lu_gp(struct t10_alua_lu_gp *); diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index e0a47f5..5cf6135 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -1741,6 +1741,176 @@ static struct target_core_configfs_attribute target_core_attr_dev_alua_lu_gp = { .store = target_core_store_alua_lu_gp, }; +static ssize_t target_core_show_dev_lba_map(void *p, char *page) +{ + struct se_device *dev = p; + struct t10_alua_lba_map *map; + struct t10_alua_lba_map_member *mem; + char *b = page; + int bl = 0; + char state; + + spin_lock(&dev->t10_alua.lba_map_lock); + if (!list_empty(&dev->t10_alua.lba_map_list)) + bl += sprintf(b + bl, "%u %u\n", + dev->t10_alua.lba_map_segment_size, + dev->t10_alua.lba_map_segment_multiplier); + list_for_each_entry(map, &dev->t10_alua.lba_map_list, lba_map_list) { + bl += sprintf(b + bl, "%llu %llu", + map->lba_map_first_lba, map->lba_map_last_lba); + list_for_each_entry(mem, &map->lba_map_mem_list, + lba_map_mem_list) { + switch (mem->lba_map_mem_alua_state) { + case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED: + state = 'O'; + break; + case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: + state = 'A'; + break; + case ALUA_ACCESS_STATE_STANDBY: + state = 'S'; + break; + case ALUA_ACCESS_STATE_UNAVAILABLE: + state = 'U'; + break; + default: + state = '.'; + break; + } + bl += sprintf(b + bl, " %d:%c", + mem->lba_map_mem_alua_pg_id, state); + } + bl += sprintf(b + bl, "\n"); + } + spin_unlock(&dev->t10_alua.lba_map_lock); + return bl; +} + +static ssize_t target_core_store_dev_lba_map( + void *p, + const char *page, + size_t count) +{ + struct se_device *dev = p; + struct t10_alua_lba_map *lba_map = NULL; + struct list_head lba_list; + char *map_entries, *ptr; + char state; + int pg_num = -1, pg; + int ret = 0, num = 0, pg_id, alua_state; + unsigned long start_lba = -1, end_lba = -1; + unsigned long segment_size = -1, segment_mult = -1; + + map_entries = kstrdup(page, GFP_KERNEL); + if (!map_entries) + return -ENOMEM; + + INIT_LIST_HEAD(&lba_list); + while ((ptr = strsep(&map_entries, "\n")) != NULL) { + if (!*ptr) + continue; + + if (num == 0) { + if (sscanf(ptr, "%lu %lu\n", + &segment_size, &segment_mult) != 2) { + pr_err("Invalid line %d\n", num); + ret = -EINVAL; + break; + } + num++; + continue; + } + if (sscanf(ptr, "%lu %lu", &start_lba, &end_lba) != 2) { + pr_err("Invalid line %d\n", num); + ret = -EINVAL; + break; + } + ptr = strchr(ptr, ' '); + if (!ptr) { + pr_err("Invalid line %d, missing end lba\n", num); + ret = -EINVAL; + break; + } + ptr++; + ptr = strchr(ptr, ' '); + if (!ptr) { + pr_err("Invalid line %d, missing state definitions\n", + num); + ret = -EINVAL; + break; + } + ptr++; + lba_map = core_alua_allocate_lba_map(&lba_list, + start_lba, end_lba); + if (IS_ERR(lba_map)) { + ret = PTR_ERR(lba_map); + break; + } + pg = 0; + while (sscanf(ptr, "%d:%c", &pg_id, &state) == 2) { + switch (state) { + case 'O': + alua_state = ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED; + break; + case 'A': + alua_state = ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED; + break; + case 'S': + alua_state = ALUA_ACCESS_STATE_STANDBY; + break; + case 'U': + alua_state = ALUA_ACCESS_STATE_UNAVAILABLE; + break; + default: + pr_err("Invalid ALUA state '%c'\n", state); + ret = -EINVAL; + goto out; + } + + ret = core_alua_allocate_lba_map_mem(lba_map, + pg_id, alua_state); + if (ret) { + pr_err("Invalid target descriptor %d:%c " + "at line %d\n", + pg_id, state, num); + break; + } + pg++; + ptr = strchr(ptr, ' '); + if (ptr) + ptr++; + else + break; + } + if (pg_num == -1) + pg_num = pg; + else if (pg != pg_num) { + pr_err("Only %d from %d port groups definitions " + "at line %d\n", pg, pg_num, num); + ret = -EINVAL; + break; + } + num++; + } +out: + if (ret) { + core_alua_free_lba_map(&lba_list); + count = ret; + } else + core_alua_set_lba_map(dev, &lba_list, + segment_size, segment_mult); + kfree(map_entries); + return count; +} + +static struct target_core_configfs_attribute target_core_attr_dev_lba_map = { + .attr = { .ca_owner = THIS_MODULE, + .ca_name = "lba_map", + .ca_mode = S_IRUGO | S_IWUSR }, + .show = target_core_show_dev_lba_map, + .store = target_core_store_dev_lba_map, +}; + static struct configfs_attribute *lio_core_dev_attrs[] = { &target_core_attr_dev_info.attr, &target_core_attr_dev_control.attr, @@ -1748,6 +1918,7 @@ static struct configfs_attribute *lio_core_dev_attrs[] = { &target_core_attr_dev_udev_path.attr, &target_core_attr_dev_enable.attr, &target_core_attr_dev_alua_lu_gp.attr, + &target_core_attr_dev_lba_map.attr, NULL, }; diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 88b4fb2..3244058 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -1585,6 +1585,7 @@ void target_free_device(struct se_device *dev) } core_alua_free_lu_gp_mem(dev); + core_alua_set_lba_map(dev, NULL, 0, 0); core_scsi3_free_all_registrations(dev); se_release_vpd_for_dev(dev); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 91953da..18c828d 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -62,6 +62,8 @@ struct kmem_cache *t10_alua_lu_gp_cache; struct kmem_cache *t10_alua_lu_gp_mem_cache; struct kmem_cache *t10_alua_tg_pt_gp_cache; struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; +struct kmem_cache *t10_alua_lba_map_cache; +struct kmem_cache *t10_alua_lba_map_mem_cache; static void transport_complete_task_attr(struct se_cmd *cmd); static void transport_handle_queue_full(struct se_cmd *cmd, @@ -128,14 +130,36 @@ int init_se_kmem_caches(void) "mem_t failed\n"); goto out_free_tg_pt_gp_cache; } + t10_alua_lba_map_cache = kmem_cache_create( + "t10_alua_lba_map_cache", + sizeof(struct t10_alua_lba_map), + __alignof__(struct t10_alua_lba_map), 0, NULL); + if (!t10_alua_lba_map_cache) { + pr_err("kmem_cache_create() for t10_alua_lba_map_" + "cache failed\n"); + goto out_free_tg_pt_gp_mem_cache; + } + t10_alua_lba_map_mem_cache = kmem_cache_create( + "t10_alua_lba_map_mem_cache", + sizeof(struct t10_alua_lba_map_member), + __alignof__(struct t10_alua_lba_map_member), 0, NULL); + if (!t10_alua_lba_map_mem_cache) { + pr_err("kmem_cache_create() for t10_alua_lba_map_mem_" + "cache failed\n"); + goto out_free_lba_map_cache; + } target_completion_wq = alloc_workqueue("target_completion", WQ_MEM_RECLAIM, 0); if (!target_completion_wq) - goto out_free_tg_pt_gp_mem_cache; + goto out_free_lba_map_mem_cache; return 0; +out_free_lba_map_mem_cache: + kmem_cache_destroy(t10_alua_lba_map_mem_cache); +out_free_lba_map_cache: + kmem_cache_destroy(t10_alua_lba_map_cache); out_free_tg_pt_gp_mem_cache: kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); out_free_tg_pt_gp_cache: @@ -164,6 +188,8 @@ void release_se_kmem_caches(void) kmem_cache_destroy(t10_alua_lu_gp_mem_cache); kmem_cache_destroy(t10_alua_tg_pt_gp_cache); kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); + kmem_cache_destroy(t10_alua_lba_map_cache); + kmem_cache_destroy(t10_alua_lba_map_mem_cache); } /* This code ensures unique mib indexes are handed out. */ -- cgit v0.10.2 From 8a0bedd36a5fe5378555bc01846d7bd7a0d38b85 Mon Sep 17 00:00:00 2001 From: Rashika Kheria Date: Wed, 18 Dec 2013 23:54:32 +0530 Subject: drivers: target: Move prototype declaration of function to header file target_core_pr.h MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Move prototype declaration of function spc_parse_naa_6h_vendor_specific() from target_core_xcopy.c to header file target_core_pr.h because it is used by more than one file. This eliminates the following warning in target_core_spc.c: drivers/target/target_core_spc.c:138:6: warning: no previous prototype for ‘spc_parse_naa_6h_vendor_specific’ [-Wmissing-prototypes] Signed-off-by: Rashika Kheria Reviewed-by: Josh Triplett Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h index ed75cdd..2ee2936 100644 --- a/drivers/target/target_core_pr.h +++ b/drivers/target/target_core_pr.h @@ -43,6 +43,11 @@ #define PR_APTPL_MAX_IPORT_LEN 256 #define PR_APTPL_MAX_TPORT_LEN 256 +/* + * Function defined in target_core_spc.c + */ +void spc_parse_naa_6h_vendor_specific(struct se_device *, unsigned char *); + extern struct kmem_cache *t10_pr_reg_cache; extern void core_pr_dump_initiator_port(struct t10_pr_registration *, diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c index 6b88a99..669c536 100644 --- a/drivers/target/target_core_xcopy.c +++ b/drivers/target/target_core_xcopy.c @@ -40,10 +40,6 @@ static struct workqueue_struct *xcopy_wq = NULL; /* - * From target_core_spc.c - */ -extern void spc_parse_naa_6h_vendor_specific(struct se_device *, unsigned char *); -/* * From target_core_device.c */ extern struct mutex g_device_mutex; -- cgit v0.10.2 From 452e20106c422dbbb439bbae69166532dc0eb816 Mon Sep 17 00:00:00 2001 From: Rashika Kheria Date: Wed, 18 Dec 2013 23:56:44 +0530 Subject: drivers: target: Mark function as static in target_core_iblock.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Mark function iblock_get_write_cache() as static in target_core_iblock.c because it is not used outside this file. This eliminates the following warning in target_core_iblock.c: drivers/target/target_core_iblock.c:766:6: warning: no previous prototype for ‘iblock_get_write_cache’ [-Wmissing-prototypes] Signed-off-by: Rashika Kheria Reviewed-by: Josh Triplett Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index c87959f..15d9121 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -763,7 +763,7 @@ iblock_parse_cdb(struct se_cmd *cmd) return sbc_parse_cdb(cmd, &iblock_sbc_ops); } -bool iblock_get_write_cache(struct se_device *dev) +static bool iblock_get_write_cache(struct se_device *dev) { struct iblock_dev *ib_dev = IBLOCK_DEV(dev); struct block_device *bd = ib_dev->ibd_bd; -- cgit v0.10.2 From f0a6c693695ab305b16c89f6313061f20ca5e240 Mon Sep 17 00:00:00 2001 From: Rashika Kheria Date: Thu, 19 Dec 2013 00:02:54 +0530 Subject: drivers: target: Mark functions as static in tcm_loop.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Mark functions tcm_loop_make_naa_tpg(), tcm_loop_drop_naa_tpg(), tcm_loop_make_scsi_hba() and tcm_loop_drop_scsi_hba() as static in loopback/tcm_loop.c because they are not used outside this file. This eliminates the following warning in loopback/tcm_loop.c: drivers/target/loopback/tcm_loop.c:1231:25: warning: no previous prototype for ‘tcm_loop_make_naa_tpg’ [-Wmissing-prototypes] drivers/target/loopback/tcm_loop.c:1276:6: warning: no previous prototype for ‘tcm_loop_drop_naa_tpg’ [-Wmissing-prototypes] drivers/target/loopback/tcm_loop.c:1308:16: warning: no previous prototype for ‘tcm_loop_make_scsi_hba’ [-Wmissing-prototypes] drivers/target/loopback/tcm_loop.c:1378:6: warning: no previous prototype for ‘tcm_loop_drop_scsi_hba’ [-Wmissing-prototypes] Signed-off-by: Rashika Kheria Reviewed-by: Josh Triplett Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index 1b41e67..763ee45 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -1228,7 +1228,7 @@ static struct configfs_attribute *tcm_loop_tpg_attrs[] = { /* Start items for tcm_loop_naa_cit */ -struct se_portal_group *tcm_loop_make_naa_tpg( +static struct se_portal_group *tcm_loop_make_naa_tpg( struct se_wwn *wwn, struct config_group *group, const char *name) @@ -1273,7 +1273,7 @@ struct se_portal_group *tcm_loop_make_naa_tpg( return &tl_tpg->tl_se_tpg; } -void tcm_loop_drop_naa_tpg( +static void tcm_loop_drop_naa_tpg( struct se_portal_group *se_tpg) { struct se_wwn *wwn = se_tpg->se_tpg_wwn; @@ -1305,7 +1305,7 @@ void tcm_loop_drop_naa_tpg( /* Start items for tcm_loop_cit */ -struct se_wwn *tcm_loop_make_scsi_hba( +static struct se_wwn *tcm_loop_make_scsi_hba( struct target_fabric_configfs *tf, struct config_group *group, const char *name) @@ -1375,7 +1375,7 @@ out: return ERR_PTR(ret); } -void tcm_loop_drop_scsi_hba( +static void tcm_loop_drop_scsi_hba( struct se_wwn *wwn) { struct tcm_loop_hba *tl_hba = container_of(wwn, -- cgit v0.10.2 From 594c42e9bd9f70a41bf79236db985ba940d28eb8 Mon Sep 17 00:00:00 2001 From: Rashika Kheria Date: Thu, 19 Dec 2013 00:05:59 +0530 Subject: drivers: target: Mark functions and structures as static in tfc_conf.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Mark functions ft_tpg_alloc_fabric_acl(), ft_register_configfs() and ft_deregister_configfs() as static in tcm_fc/tfc_conf.c because they are not used outside this file. This eliminates the following warnings in tcm_fc/tfc_conf.c: drivers/target/tcm_fc/tfc_conf.c:270:21: warning: no previous prototype for ‘ft_tpg_alloc_fabric_acl’ [-Wmissing-prototypes] drivers/target/tcm_fc/tfc_conf.c:555:5: warning: no previous prototype for ‘ft_register_configfs’ [-Wmissing-prototypes] drivers/target/tcm_fc/tfc_conf.c:602:6: warning: no previous prototype for ‘ft_deregister_configfs’ [-Wmissing-prototypes] Signed-off-by: Rashika Kheria Reviewed-by: Josh Triplett Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c index c6932fb..e879da8 100644 --- a/drivers/target/tcm_fc/tfc_conf.c +++ b/drivers/target/tcm_fc/tfc_conf.c @@ -267,7 +267,7 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata) return found; } -struct se_node_acl *ft_tpg_alloc_fabric_acl(struct se_portal_group *se_tpg) +static struct se_node_acl *ft_tpg_alloc_fabric_acl(struct se_portal_group *se_tpg) { struct ft_node_acl *acl; @@ -552,7 +552,7 @@ static struct target_core_fabric_ops ft_fabric_ops = { .fabric_drop_nodeacl = &ft_del_acl, }; -int ft_register_configfs(void) +static int ft_register_configfs(void) { struct target_fabric_configfs *fabric; int ret; @@ -599,7 +599,7 @@ int ft_register_configfs(void) return 0; } -void ft_deregister_configfs(void) +static void ft_deregister_configfs(void) { if (!ft_configfs) return; -- cgit v0.10.2 From 38edd724577123c972f2264382005ac910ce747f Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Thu, 19 Dec 2013 14:36:11 +0100 Subject: target_core_alua: check for buffer overflow When a writing to a command-provided buffer we need to ensure that we're not writing past the end of it. At the same time we need to continue processing as typically the final data length (ie the required size of the buffer) need to be returned. Signed-off-by: Hannes Reinecke Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index e73edca..12da9b38 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -96,22 +96,33 @@ target_emulate_report_referrals(struct se_cmd *cmd) int pg_num; off += 4; - put_unaligned_be64(map->lba_map_first_lba, &buf[off]); + if (cmd->data_length > off) + put_unaligned_be64(map->lba_map_first_lba, &buf[off]); off += 8; - put_unaligned_be64(map->lba_map_last_lba, &buf[off]); + if (cmd->data_length > off) + put_unaligned_be64(map->lba_map_last_lba, &buf[off]); off += 8; rd_len += 20; pg_num = 0; list_for_each_entry(map_mem, &map->lba_map_mem_list, lba_map_mem_list) { - buf[off++] = map_mem->lba_map_mem_alua_state & 0x0f; + int alua_state = map_mem->lba_map_mem_alua_state; + int alua_pg_id = map_mem->lba_map_mem_alua_pg_id; + + if (cmd->data_length > off) + buf[off] = alua_state & 0x0f; + off += 2; + if (cmd->data_length > off) + buf[off] = (alua_pg_id >> 8) & 0xff; + off++; + if (cmd->data_length > off) + buf[off] = (alua_pg_id & 0xff); off++; - buf[off++] = (map_mem->lba_map_mem_alua_pg_id >> 8) & 0xff; - buf[off++] = (map_mem->lba_map_mem_alua_pg_id & 0xff); rd_len += 4; pg_num++; } - buf[desc_num] = pg_num; + if (cmd->data_length > desc_num) + buf[desc_num] = pg_num; } spin_unlock(&dev->t10_alua.lba_map_lock); -- cgit v0.10.2 From f82f320edc1e26320bd7e58b347d5616e6a23ff2 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Thu, 19 Dec 2013 14:13:28 -0800 Subject: target: Convert inquiry temporary buffer to heap memory This patch converts the temporary buffer in spc_emulate_inquiry() to use dynamically allocated memory, instead of local stack memory. Also bump SE_INQUIRY_BUF up to 1024 bytes to be safe when handling multiple large SCSI name descriptors for EVPD=0x83. Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index f9889fd..279d260 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c @@ -697,11 +697,15 @@ spc_emulate_inquiry(struct se_cmd *cmd) struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg; unsigned char *rbuf; unsigned char *cdb = cmd->t_task_cdb; - unsigned char buf[SE_INQUIRY_BUF]; + unsigned char *buf; sense_reason_t ret; int p; - memset(buf, 0, SE_INQUIRY_BUF); + buf = kzalloc(SE_INQUIRY_BUF, GFP_KERNEL); + if (!buf) { + pr_err("Unable to allocate response buffer for INQUIRY\n"); + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } if (dev == tpg->tpg_virt_lun0.lun_se_dev) buf[0] = 0x3f; /* Not connected */ @@ -734,9 +738,10 @@ spc_emulate_inquiry(struct se_cmd *cmd) out: rbuf = transport_kmap_data_sg(cmd); if (rbuf) { - memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); + memcpy(rbuf, buf, min_t(u32, SE_INQUIRY_BUF, cmd->data_length)); transport_kunmap_data_sg(cmd); } + kfree(buf); if (!ret) target_complete_cmd(cmd, GOOD); diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 1ba19a4..dd87ab4 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -112,7 +112,7 @@ /* Queue Algorithm Modifier default for restricted reordering in control mode page */ #define DA_EMULATE_REST_REORD 0 -#define SE_INQUIRY_BUF 768 +#define SE_INQUIRY_BUF 1024 #define SE_MODE_PAGE_BUF 512 #define SE_SENSE_BUF 96 -- cgit v0.10.2 From ce65e5b97b19446ff91e9ee0bbcd9f8b8e87ae5a Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Mon, 23 Dec 2013 20:28:13 +0000 Subject: target: Add DIF related base definitions This patch adds DIF related definitions to target_core_base.h that includes enums for target_prot_op + target_prot_type + target_prot_version + target_guard_type + target_pi_error. Also included is struct se_dif_v1_tuple, along with changes to struct se_cmd, struct se_dev_attrib, and struct se_device. Also, add new se_subsystem_api->[init,format,free]_prot() callers used by target core code to setup backend specific protection information after the device has been configured. Enums taken from Sagi Grimberg's original patch. v2 changes: - Drop guard_type related definitions - Update target_prot_op + target_prot_ho definitions (Sagi) - Drop SCF_PROT + pi_prot_version flag - Add se_subsystem_api->format_prot() (Sagi) - Add hw_pi_prot_type device attribute Cc: Martin K. Petersen Cc: Christoph Hellwig Cc: Hannes Reinecke Cc: Sagi Grimberg Cc: Or Gerlitz Signed-off-by: Nicholas Bellinger diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h index 39e0114..0dc2745 100644 --- a/include/target/target_core_backend.h +++ b/include/target/target_core_backend.h @@ -41,6 +41,9 @@ struct se_subsystem_api { unsigned int (*get_io_opt)(struct se_device *); unsigned char *(*get_sense_buffer)(struct se_cmd *); bool (*get_write_cache)(struct se_device *); + int (*init_prot)(struct se_device *); + int (*format_prot)(struct se_device *); + void (*free_prot)(struct se_device *); }; struct sbc_ops { diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index dd87ab4..d98048b 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -435,6 +435,34 @@ struct se_tmr_req { struct list_head tmr_list; }; +enum target_prot_op { + TARGET_PROT_NORMAL = 0, + TARGET_PROT_DIN_INSERT, + TARGET_PROT_DOUT_INSERT, + TARGET_PROT_DIN_STRIP, + TARGET_PROT_DOUT_STRIP, + TARGET_PROT_DIN_PASS, + TARGET_PROT_DOUT_PASS, +}; + +enum target_prot_ho { + PROT_SEPERATED, + PROT_INTERLEAVED, +}; + +enum target_prot_type { + TARGET_DIF_TYPE0_PROT, + TARGET_DIF_TYPE1_PROT, + TARGET_DIF_TYPE2_PROT, + TARGET_DIF_TYPE3_PROT, +}; + +struct se_dif_v1_tuple { + __be16 guard_tag; + __be16 app_tag; + __be32 ref_tag; +}; + struct se_cmd { /* SAM response code being sent to initiator */ u8 scsi_status; @@ -519,6 +547,17 @@ struct se_cmd { /* Used for lun->lun_ref counting */ bool lun_ref_active; + + /* DIF related members */ + enum target_prot_op prot_op; + enum target_prot_type prot_type; + u32 prot_length; + u32 reftag_seed; + struct scatterlist *t_prot_sg; + unsigned int t_prot_nents; + enum target_prot_ho prot_handover; + sense_reason_t pi_err; + u32 block_num; }; struct se_ua { @@ -629,6 +668,9 @@ struct se_dev_attrib { int emulate_tpws; int emulate_caw; int emulate_3pc; + int pi_prot_format; + enum target_prot_type pi_prot_type; + enum target_prot_type hw_pi_prot_type; int enforce_pr_isids; int is_nonrot; int emulate_rest_reord; @@ -759,6 +801,8 @@ struct se_device { /* Linked list for struct se_hba struct se_device list */ struct list_head dev_list; struct se_lun xcopy_lun; + /* Protection Information */ + int prot_length; }; struct se_hba { -- cgit v0.10.2 From fcc4f17b9ce931c93ce08f8cf27d6bd010f0b1ef Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Tue, 7 Jan 2014 23:39:33 +0000 Subject: target: Add DIF CHECK_CONDITION ASC/ASCQ exception cases This patch adds support for DIF related CHECK_CONDITION ASC/ASCQ exception cases into transport_send_check_condition_and_sense(). This includes: LOGICAL BLOCK GUARD CHECK FAILED LOGICAL BLOCK APPLICATION TAG CHECK FAILED LOGICAL BLOCK REFERENCE TAG CHECK FAILED that used by DIF TYPE1 and TYPE3 failure cases. Cc: Martin K. Petersen Cc: Christoph Hellwig Cc: Hannes Reinecke Cc: Sagi Grimberg Cc: Or Gerlitz Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 18c828d..fa4fc04 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -2674,6 +2674,36 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd, buffer[SPC_ASC_KEY_OFFSET] = 0x1d; buffer[SPC_ASCQ_KEY_OFFSET] = 0x00; break; + case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED: + /* CURRENT ERROR */ + buffer[0] = 0x70; + buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; + /* ILLEGAL REQUEST */ + buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; + /* LOGICAL BLOCK GUARD CHECK FAILED */ + buffer[SPC_ASC_KEY_OFFSET] = 0x10; + buffer[SPC_ASCQ_KEY_OFFSET] = 0x01; + break; + case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED: + /* CURRENT ERROR */ + buffer[0] = 0x70; + buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; + /* ILLEGAL REQUEST */ + buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; + /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */ + buffer[SPC_ASC_KEY_OFFSET] = 0x10; + buffer[SPC_ASCQ_KEY_OFFSET] = 0x02; + break; + case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED: + /* CURRENT ERROR */ + buffer[0] = 0x70; + buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; + /* ILLEGAL REQUEST */ + buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; + /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */ + buffer[SPC_ASC_KEY_OFFSET] = 0x10; + buffer[SPC_ASCQ_KEY_OFFSET] = 0x03; + break; case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: default: /* CURRENT ERROR */ diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index d98048b..0336d70 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -205,6 +205,9 @@ enum tcm_sense_reason_table { TCM_OUT_OF_RESOURCES = R(0x12), TCM_PARAMETER_LIST_LENGTH_ERROR = R(0x13), TCM_MISCOMPARE_VERIFY = R(0x14), + TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED = R(0x15), + TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED = R(0x16), + TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED = R(0x17), #undef R }; -- cgit v0.10.2 From 499bf77b0169605a23c38351e3849066fe696877 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Mon, 23 Dec 2013 20:30:03 +0000 Subject: target/sbc: Add DIF setup in sbc_check_prot + sbc_parse_cdb This patch adds sbc_check_prot() for performing various DIF related CDB sanity checks, along with setting cmd->prot_type once sanity checks have passed. Also, add calls in sbc_parse_cdb() for READ_[10,12,16] + WRITE_[10,12,16] to perform DIF sanity checking. v2 changes: - Make sbc_check_prot defined as static (Fengguang + Wei) - Remove unprotected READ/WRITE warning (mkp) - Populate cmd->prot_type + friends (Sagi) - Drop SCF_PROT usage Cc: Martin K. Petersen Cc: Christoph Hellwig Cc: Hannes Reinecke Cc: Sagi Grimberg Cc: Or Gerlitz Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 6863dbe..91a92f3 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -563,6 +563,44 @@ sbc_compare_and_write(struct se_cmd *cmd) return TCM_NO_SENSE; } +static bool +sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb, + u32 sectors) +{ + if (!cmd->t_prot_sg || !cmd->t_prot_nents) + return true; + + switch (dev->dev_attrib.pi_prot_type) { + case TARGET_DIF_TYPE3_PROT: + if (!(cdb[1] & 0xe0)) + return true; + + cmd->reftag_seed = 0xffffffff; + break; + case TARGET_DIF_TYPE2_PROT: + if (cdb[1] & 0xe0) + return false; + + cmd->reftag_seed = cmd->t_task_lba; + break; + case TARGET_DIF_TYPE1_PROT: + if (!(cdb[1] & 0xe0)) + return true; + + cmd->reftag_seed = cmd->t_task_lba; + break; + case TARGET_DIF_TYPE0_PROT: + default: + return true; + } + + cmd->prot_type = dev->dev_attrib.pi_prot_type; + cmd->prot_length = dev->prot_length * sectors; + cmd->prot_handover = PROT_SEPERATED; + + return true; +} + sense_reason_t sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) { @@ -583,6 +621,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) case READ_10: sectors = transport_get_sectors_10(cdb); cmd->t_task_lba = transport_lba_32(cdb); + + if (!sbc_check_prot(dev, cmd, cdb, sectors)) + return TCM_UNSUPPORTED_SCSI_OPCODE; + cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; cmd->execute_rw = ops->execute_rw; cmd->execute_cmd = sbc_execute_rw; @@ -590,6 +632,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) case READ_12: sectors = transport_get_sectors_12(cdb); cmd->t_task_lba = transport_lba_32(cdb); + + if (!sbc_check_prot(dev, cmd, cdb, sectors)) + return TCM_UNSUPPORTED_SCSI_OPCODE; + cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; cmd->execute_rw = ops->execute_rw; cmd->execute_cmd = sbc_execute_rw; @@ -597,6 +643,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) case READ_16: sectors = transport_get_sectors_16(cdb); cmd->t_task_lba = transport_lba_64(cdb); + + if (!sbc_check_prot(dev, cmd, cdb, sectors)) + return TCM_UNSUPPORTED_SCSI_OPCODE; + cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; cmd->execute_rw = ops->execute_rw; cmd->execute_cmd = sbc_execute_rw; @@ -612,6 +662,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) case WRITE_VERIFY: sectors = transport_get_sectors_10(cdb); cmd->t_task_lba = transport_lba_32(cdb); + + if (!sbc_check_prot(dev, cmd, cdb, sectors)) + return TCM_UNSUPPORTED_SCSI_OPCODE; + if (cdb[1] & 0x8) cmd->se_cmd_flags |= SCF_FUA; cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; @@ -621,6 +675,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) case WRITE_12: sectors = transport_get_sectors_12(cdb); cmd->t_task_lba = transport_lba_32(cdb); + + if (!sbc_check_prot(dev, cmd, cdb, sectors)) + return TCM_UNSUPPORTED_SCSI_OPCODE; + if (cdb[1] & 0x8) cmd->se_cmd_flags |= SCF_FUA; cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; @@ -630,6 +688,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) case WRITE_16: sectors = transport_get_sectors_16(cdb); cmd->t_task_lba = transport_lba_64(cdb); + + if (!sbc_check_prot(dev, cmd, cdb, sectors)) + return TCM_UNSUPPORTED_SCSI_OPCODE; + if (cdb[1] & 0x8) cmd->se_cmd_flags |= SCF_FUA; cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; -- cgit v0.10.2 From 41861fa831afd4b5006f0042e1f701360330351e Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Tue, 7 Jan 2014 22:44:57 +0000 Subject: target/sbc: Add DIF TYPE1+TYPE3 read/write verify emulation This patch adds support for DIF read/write verify emulation for TARGET_DIF_TYPE1_PROT + TARGET_DIF_TYPE3_PROT operation. This includes sbc_dif_verify_write() + sbc_dif_verify_read() calls accessable by backend drivers to perform DIF verify for SGL based data and protection information. Also included is sbc_dif_copy_prot() logic to copy protection information to/from backend provided protection SGLs. Based on scsi_debug.c DIF TYPE1+TYPE3 emulation. v2 changes: - Select CRC_T10DIF for TARGET_CORE in Kconfig (Fengguang) - Drop IP checksum logic from sbc_dif_v1_verify (MKP) - Fix offset on app_tag = 0xffff in sbc_dif_verify_read() Cc: Martin K. Petersen Cc: Christoph Hellwig Cc: Hannes Reinecke Cc: Sagi Grimberg Cc: Or Gerlitz Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig index 1830368..50aad2e 100644 --- a/drivers/target/Kconfig +++ b/drivers/target/Kconfig @@ -3,6 +3,7 @@ menuconfig TARGET_CORE tristate "Generic Target Core Mod (TCM) and ConfigFS Infrastructure" depends on SCSI && BLOCK select CONFIGFS_FS + select CRC_T10DIF default n help Say Y or M here to enable the TCM Storage Engine and ConfigFS enabled diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 91a92f3..26e8bfb 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -1024,3 +1025,180 @@ err: return ret; } EXPORT_SYMBOL(sbc_execute_unmap); + +static sense_reason_t +sbc_dif_v1_verify(struct se_device *dev, struct se_dif_v1_tuple *sdt, + const void *p, sector_t sector, unsigned int ei_lba) +{ + int block_size = dev->dev_attrib.block_size; + __be16 csum; + + csum = cpu_to_be16(crc_t10dif(p, block_size)); + + if (sdt->guard_tag != csum) { + pr_err("DIFv1 checksum failed on sector %llu guard tag 0x%04x" + " csum 0x%04x\n", (unsigned long long)sector, + be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum)); + return TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; + } + + if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT && + be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { + pr_err("DIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x" + " sector MSB: 0x%08x\n", (unsigned long long)sector, + be32_to_cpu(sdt->ref_tag), (u32)(sector & 0xffffffff)); + return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; + } + + if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE2_PROT && + be32_to_cpu(sdt->ref_tag) != ei_lba) { + pr_err("DIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x" + " ei_lba: 0x%08x\n", (unsigned long long)sector, + be32_to_cpu(sdt->ref_tag), ei_lba); + return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; + } + + return 0; +} + +static void +sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read, + struct scatterlist *sg, int sg_off) +{ + struct se_device *dev = cmd->se_dev; + struct scatterlist *psg; + void *paddr, *addr; + unsigned int i, len, left; + + left = sectors * dev->prot_length; + + for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) { + + len = min(psg->length, left); + paddr = kmap_atomic(sg_page(psg)) + psg->offset; + addr = kmap_atomic(sg_page(sg)) + sg_off; + + if (read) + memcpy(paddr, addr, len); + else + memcpy(addr, paddr, len); + + left -= len; + kunmap_atomic(paddr); + kunmap_atomic(addr); + } +} + +sense_reason_t +sbc_dif_verify_write(struct se_cmd *cmd, sector_t start, unsigned int sectors, + unsigned int ei_lba, struct scatterlist *sg, int sg_off) +{ + struct se_device *dev = cmd->se_dev; + struct se_dif_v1_tuple *sdt; + struct scatterlist *dsg, *psg = cmd->t_prot_sg; + sector_t sector = start; + void *daddr, *paddr; + int i, j, offset = 0; + sense_reason_t rc; + + for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) { + daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; + paddr = kmap_atomic(sg_page(psg)) + psg->offset; + + for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) { + + if (offset >= psg->length) { + kunmap_atomic(paddr); + psg = sg_next(psg); + paddr = kmap_atomic(sg_page(psg)) + psg->offset; + offset = 0; + } + + sdt = paddr + offset; + + pr_debug("DIF WRITE sector: %llu guard_tag: 0x%04x" + " app_tag: 0x%04x ref_tag: %u\n", + (unsigned long long)sector, sdt->guard_tag, + sdt->app_tag, be32_to_cpu(sdt->ref_tag)); + + rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector, + ei_lba); + if (rc) { + kunmap_atomic(paddr); + kunmap_atomic(daddr); + return rc; + } + + sector++; + ei_lba++; + offset += sizeof(struct se_dif_v1_tuple); + } + + kunmap_atomic(paddr); + kunmap_atomic(daddr); + } + sbc_dif_copy_prot(cmd, sectors, false, sg, sg_off); + + return 0; +} +EXPORT_SYMBOL(sbc_dif_verify_write); + +sense_reason_t +sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors, + unsigned int ei_lba, struct scatterlist *sg, int sg_off) +{ + struct se_device *dev = cmd->se_dev; + struct se_dif_v1_tuple *sdt; + struct scatterlist *dsg; + sector_t sector = start; + void *daddr, *paddr; + int i, j, offset = sg_off; + sense_reason_t rc; + + for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) { + daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; + paddr = kmap_atomic(sg_page(sg)) + sg->offset; + + for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) { + + if (offset >= sg->length) { + kunmap_atomic(paddr); + sg = sg_next(sg); + paddr = kmap_atomic(sg_page(sg)) + sg->offset; + offset = 0; + } + + sdt = paddr + offset; + + pr_debug("DIF READ sector: %llu guard_tag: 0x%04x" + " app_tag: 0x%04x ref_tag: %u\n", + (unsigned long long)sector, sdt->guard_tag, + sdt->app_tag, be32_to_cpu(sdt->ref_tag)); + + if (sdt->app_tag == cpu_to_be16(0xffff)) { + sector++; + offset += sizeof(struct se_dif_v1_tuple); + continue; + } + + rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector, + ei_lba); + if (rc) { + kunmap_atomic(paddr); + kunmap_atomic(daddr); + return rc; + } + + sector++; + ei_lba++; + offset += sizeof(struct se_dif_v1_tuple); + } + + kunmap_atomic(paddr); + kunmap_atomic(daddr); + } + sbc_dif_copy_prot(cmd, sectors, true, sg, sg_off); + + return 0; +} +EXPORT_SYMBOL(sbc_dif_verify_read); diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h index 0dc2745..7020e33 100644 --- a/include/target/target_core_backend.h +++ b/include/target/target_core_backend.h @@ -73,6 +73,10 @@ sense_reason_t sbc_execute_unmap(struct se_cmd *cmd, sense_reason_t (*do_unmap_fn)(struct se_cmd *cmd, void *priv, sector_t lba, sector_t nolb), void *priv); +sense_reason_t sbc_dif_verify_write(struct se_cmd *, sector_t, unsigned int, + unsigned int, struct scatterlist *, int); +sense_reason_t sbc_dif_verify_read(struct se_cmd *, sector_t, unsigned int, + unsigned int, struct scatterlist *, int); void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *); int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *); -- cgit v0.10.2 From bdbad2bdcbda68746cdff36459cfb1bf4b1d5e59 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Mon, 23 Dec 2013 20:32:46 +0000 Subject: target/spc: Add protection bit to standard INQUIRY output This patch updates spc_emulate_inquiry_std() to set the PROTECT bit when DIF emulation is enabled by the backend device. Reviewed-by: Martin K. Petersen Cc: Christoph Hellwig Cc: Hannes Reinecke Cc: Sagi Grimberg Cc: Or Gerlitz Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index 279d260..4178c2a 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c @@ -100,6 +100,11 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf) */ if (dev->dev_attrib.emulate_3pc) buf[5] |= 0x8; + /* + * Set Protection (PROTECT) bit when DIF has been enabled. + */ + if (dev->dev_attrib.pi_prot_type) + buf[5] |= 0x1; buf[7] = 0x2; /* CmdQue=1 */ -- cgit v0.10.2 From 43bb95c7c06de358de7ab525d28bb89b470c1892 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Mon, 23 Dec 2013 20:33:21 +0000 Subject: target/spc: Add protection related bits to INQUIRY EVPD=0x86 This patch updates spc_emulate_evpd_86() (extended INQUIRY) to report GRD_CHK (Guard Check) and REF_CHK (Reference Check) bits when DIF emulation is enabled by the backend device. Reviewed-by: Martin K. Petersen Cc: Christoph Hellwig Cc: Hannes Reinecke Cc: Sagi Grimberg Cc: Or Gerlitz Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index 4178c2a..73fdff5 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c @@ -475,6 +475,15 @@ spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) struct se_device *dev = cmd->se_dev; buf[3] = 0x3c; + /* + * Set GRD_CHK + REF_CHK for TYPE1 protection, or GRD_CHK + * only for TYPE3 protection. + */ + if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT) + buf[4] = 0x5; + else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT) + buf[4] = 0x4; + /* Set HEADSUP, ORDSUP, SIMPSUP */ buf[5] = 0x07; -- cgit v0.10.2 From 56dac14cd25259fc567c5c4726a8f30f0ea407ce Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Mon, 23 Dec 2013 20:36:09 +0000 Subject: target/sbc: Add P_TYPE + PROT_EN bits to READ_CAPACITY_16 This patch updates sbc_emulate_readcapacity_16() to set P_TYPE and PROT_EN bits when DIF emulation is enabled by the backend device. Reviewed-by: Martin K. Petersen Cc: Christoph Hellwig Cc: Hannes Reinecke Cc: Sagi Grimberg Cc: Or Gerlitz Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 26e8bfb..75364c7 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -106,6 +106,11 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd) buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff; buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff; buf[11] = dev->dev_attrib.block_size & 0xff; + /* + * Set P_TYPE and PROT_EN bits for DIF support + */ + if (dev->dev_attrib.pi_prot_type) + buf[12] = (dev->dev_attrib.pi_prot_type - 1) << 1 | 0x1; if (dev->transport->get_lbppbe) buf[13] = dev->transport->get_lbppbe(dev) & 0x0f; -- cgit v0.10.2 From 0c30f421a7dba0a7ca9a3f67dad15d84d4d61e01 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Mon, 23 Dec 2013 21:23:34 +0000 Subject: target/spc: Expose ATO bit in control mode page This patch updates spc_modesense_control() to set the Application Tag Owner (ATO) bit when when DIF emulation is enabled by the backend device. Reviewed-by: Martin K. Petersen Cc: Christoph Hellwig Cc: Hannes Reinecke Cc: Sagi Grimberg Cc: Or Gerlitz Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index 73fdff5..43c5ca98 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c @@ -858,6 +858,19 @@ static int spc_modesense_control(struct se_device *dev, u8 pc, u8 *p) * status (see SAM-4). */ p[5] = (dev->dev_attrib.emulate_tas) ? 0x40 : 0x00; + /* + * From spc4r30, section 7.5.7 Control mode page + * + * Application Tag Owner (ATO) bit set to one. + * + * If the ATO bit is set to one the device server shall not modify the + * LOGICAL BLOCK APPLICATION TAG field and, depending on the protection + * type, shall not modify the contents of the LOGICAL BLOCK REFERENCE + * TAG field. + */ + if (dev->dev_attrib.pi_prot_type) + p[5] |= 0x80; + p[8] = 0xff; p[9] = 0xff; p[11] = 30; -- cgit v0.10.2 From 2ed22c9cbca61f0f696a0a1cbbb777da281bd79d Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Wed, 8 Jan 2014 18:19:31 +0000 Subject: target/configfs: Expose protection device attributes This patch adds support for exposing DIF protection device attributes via configfs. This includes: pi_prot_type: Protection Type (0, 1, 3 currently support) pi_prot_format: Protection Format Operation (FILEIO only) Within se_dev_set_pi_prot_type() it also adds the se_subsystem_api device callbacks to setup per device protection information. v2 changes: - Drop pi_guard_type + pi_prot_version related code (MKP) - Add pi_prot_format logic (Sagi) - Add ->free_prot callback in target_free_device - Add hw_pi_prot_type read-only attribute Cc: Martin K. Petersen Cc: Christoph Hellwig Cc: Hannes Reinecke Cc: Sagi Grimberg Cc: Or Gerlitz Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 5cf6135..f0e85b1 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -643,6 +643,15 @@ SE_DEV_ATTR(emulate_caw, S_IRUGO | S_IWUSR); DEF_DEV_ATTRIB(emulate_3pc); SE_DEV_ATTR(emulate_3pc, S_IRUGO | S_IWUSR); +DEF_DEV_ATTRIB(pi_prot_type); +SE_DEV_ATTR(pi_prot_type, S_IRUGO | S_IWUSR); + +DEF_DEV_ATTRIB_RO(hw_pi_prot_type); +SE_DEV_ATTR_RO(hw_pi_prot_type); + +DEF_DEV_ATTRIB(pi_prot_format); +SE_DEV_ATTR(pi_prot_format, S_IRUGO | S_IWUSR); + DEF_DEV_ATTRIB(enforce_pr_isids); SE_DEV_ATTR(enforce_pr_isids, S_IRUGO | S_IWUSR); @@ -702,6 +711,9 @@ static struct configfs_attribute *target_core_dev_attrib_attrs[] = { &target_core_dev_attrib_emulate_tpws.attr, &target_core_dev_attrib_emulate_caw.attr, &target_core_dev_attrib_emulate_3pc.attr, + &target_core_dev_attrib_pi_prot_type.attr, + &target_core_dev_attrib_hw_pi_prot_type.attr, + &target_core_dev_attrib_pi_prot_format.attr, &target_core_dev_attrib_enforce_pr_isids.attr, &target_core_dev_attrib_is_nonrot.attr, &target_core_dev_attrib_emulate_rest_reord.attr, diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 3244058..883099e 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -918,6 +918,90 @@ int se_dev_set_emulate_3pc(struct se_device *dev, int flag) return 0; } +int se_dev_set_pi_prot_type(struct se_device *dev, int flag) +{ + int rc, old_prot = dev->dev_attrib.pi_prot_type; + + if (flag != 0 && flag != 1 && flag != 2 && flag != 3) { + pr_err("Illegal value %d for pi_prot_type\n", flag); + return -EINVAL; + } + if (flag == 2) { + pr_err("DIF TYPE2 protection currently not supported\n"); + return -ENOSYS; + } + if (dev->dev_attrib.hw_pi_prot_type) { + pr_warn("DIF protection enabled on underlying hardware," + " ignoring\n"); + return 0; + } + if (!dev->transport->init_prot || !dev->transport->free_prot) { + pr_err("DIF protection not supported by backend: %s\n", + dev->transport->name); + return -ENOSYS; + } + if (!(dev->dev_flags & DF_CONFIGURED)) { + pr_err("DIF protection requires device to be configured\n"); + return -ENODEV; + } + if (dev->export_count) { + pr_err("dev[%p]: Unable to change SE Device PROT type while" + " export_count is %d\n", dev, dev->export_count); + return -EINVAL; + } + + dev->dev_attrib.pi_prot_type = flag; + + if (flag && !old_prot) { + rc = dev->transport->init_prot(dev); + if (rc) { + dev->dev_attrib.pi_prot_type = old_prot; + return rc; + } + + } else if (!flag && old_prot) { + dev->transport->free_prot(dev); + } + pr_debug("dev[%p]: SE Device Protection Type: %d\n", dev, flag); + + return 0; +} + +int se_dev_set_pi_prot_format(struct se_device *dev, int flag) +{ + int rc; + + if (!flag) + return 0; + + if (flag != 1) { + pr_err("Illegal value %d for pi_prot_format\n", flag); + return -EINVAL; + } + if (!dev->transport->format_prot) { + pr_err("DIF protection format not supported by backend %s\n", + dev->transport->name); + return -ENOSYS; + } + if (!(dev->dev_flags & DF_CONFIGURED)) { + pr_err("DIF protection format requires device to be configured\n"); + return -ENODEV; + } + if (dev->export_count) { + pr_err("dev[%p]: Unable to format SE Device PROT type while" + " export_count is %d\n", dev, dev->export_count); + return -EINVAL; + } + + rc = dev->transport->format_prot(dev); + if (rc) + return rc; + + pr_debug("dev[%p]: SE Device Protection Format complete\n", dev); + + return 0; +} + int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag) { if ((flag != 0) && (flag != 1)) { @@ -1415,6 +1499,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) dev->dev_link_magic = SE_DEV_LINK_MAGIC; dev->se_hba = hba; dev->transport = hba->transport; + dev->prot_length = sizeof(struct se_dif_v1_tuple); INIT_LIST_HEAD(&dev->dev_list); INIT_LIST_HEAD(&dev->dev_sep_list); @@ -1457,6 +1542,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS; dev->dev_attrib.emulate_caw = DA_EMULATE_CAW; dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC; + dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT; dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS; dev->dev_attrib.is_nonrot = DA_IS_NONROT; dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD; @@ -1589,6 +1675,9 @@ void target_free_device(struct se_device *dev) core_scsi3_free_all_registrations(dev); se_release_vpd_for_dev(dev); + if (dev->transport->free_prot) + dev->transport->free_prot(dev); + dev->transport->free_device(dev); } diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h index 1e27614..de9cab7 100644 --- a/drivers/target/target_core_internal.h +++ b/drivers/target/target_core_internal.h @@ -35,6 +35,8 @@ int se_dev_set_emulate_tpu(struct se_device *, int); int se_dev_set_emulate_tpws(struct se_device *, int); int se_dev_set_emulate_caw(struct se_device *, int); int se_dev_set_emulate_3pc(struct se_device *, int); +int se_dev_set_pi_prot_type(struct se_device *, int); +int se_dev_set_pi_prot_format(struct se_device *, int); int se_dev_set_enforce_pr_isids(struct se_device *, int); int se_dev_set_is_nonrot(struct se_device *, int); int se_dev_set_emulate_rest_reord(struct se_device *dev, int); -- cgit v0.10.2 From def2b339b422070ecb99298a80e4b15033adc0ce Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Mon, 23 Dec 2013 20:38:30 +0000 Subject: target: Add protection SGLs to target_submit_cmd_map_sgls This patch adds support to target_submit_cmd_map_sgls() for accepting 'sgl_prot' + 'sgl_prot_count' parameters for DIF protection information. Note the passed parameters are stored at se_cmd->t_prot_sg and se_cmd->t_prot_nents respectively. Also, update tcm_loop and vhost-scsi fabrics usage of target_submit_cmd_map_sgls() to take into account the new parameters. Cc: Martin K. Petersen Cc: Christoph Hellwig Cc: Hannes Reinecke Cc: Sagi Grimberg Cc: Or Gerlitz Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index 763ee45..112b795 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -217,7 +217,7 @@ static void tcm_loop_submission_work(struct work_struct *work) scsi_bufflen(sc), tcm_loop_sam_attr(sc), sc->sc_data_direction, 0, scsi_sglist(sc), scsi_sg_count(sc), - sgl_bidi, sgl_bidi_count); + sgl_bidi, sgl_bidi_count, NULL, 0); if (rc < 0) { set_host_byte(sc, DID_NO_CONNECT); goto out_done; diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index fa4fc04..aebe0bb 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -1310,6 +1310,8 @@ transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl, * @sgl_count: scatterlist count for unidirectional mapping * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping * @sgl_bidi_count: scatterlist count for bidirectional READ mapping + * @sgl_prot: struct scatterlist memory protection information + * @sgl_prot_count: scatterlist count for protection information * * Returns non zero to signal active I/O shutdown failure. All other * setup exceptions will be returned as a SCSI CHECK_CONDITION response, @@ -1322,7 +1324,8 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess unsigned char *cdb, unsigned char *sense, u32 unpacked_lun, u32 data_length, int task_attr, int data_dir, int flags, struct scatterlist *sgl, u32 sgl_count, - struct scatterlist *sgl_bidi, u32 sgl_bidi_count) + struct scatterlist *sgl_bidi, u32 sgl_bidi_count, + struct scatterlist *sgl_prot, u32 sgl_prot_count) { struct se_portal_group *se_tpg; sense_reason_t rc; @@ -1364,6 +1367,14 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess target_put_sess_cmd(se_sess, se_cmd); return 0; } + /* + * Save pointers for SGLs containing protection information, + * if present. + */ + if (sgl_prot_count) { + se_cmd->t_prot_sg = sgl_prot; + se_cmd->t_prot_nents = sgl_prot_count; + } rc = target_setup_cmd_from_cdb(se_cmd, cdb); if (rc != 0) { @@ -1406,6 +1417,7 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess return 0; } } + /* * Check if we need to delay processing because of ALUA * Active/NonOptimized primary access state.. @@ -1445,7 +1457,7 @@ int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, { return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense, unpacked_lun, data_length, task_attr, data_dir, - flags, NULL, 0, NULL, 0); + flags, NULL, 0, NULL, 0, NULL, 0); } EXPORT_SYMBOL(target_submit_cmd); diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index f175629..84488a8 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -889,7 +889,7 @@ static void tcm_vhost_submission_work(struct work_struct *work) cmd->tvc_lun, cmd->tvc_exp_data_len, cmd->tvc_task_attr, cmd->tvc_data_direction, TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count, - sg_bidi_ptr, sg_no_bidi); + sg_bidi_ptr, sg_no_bidi, NULL, 0); if (rc < 0) { transport_send_check_condition_and_sense(se_cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h index 4cf4fda..0218d68 100644 --- a/include/target/target_core_fabric.h +++ b/include/target/target_core_fabric.h @@ -105,7 +105,8 @@ sense_reason_t transport_lookup_cmd_lun(struct se_cmd *, u32); sense_reason_t target_setup_cmd_from_cdb(struct se_cmd *, unsigned char *); int target_submit_cmd_map_sgls(struct se_cmd *, struct se_session *, unsigned char *, unsigned char *, u32, u32, int, int, int, - struct scatterlist *, u32, struct scatterlist *, u32); + struct scatterlist *, u32, struct scatterlist *, u32, + struct scatterlist *, u32); int target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *, unsigned char *, u32, u32, int, int, int); int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, -- cgit v0.10.2 From ecebbf6ccbca58b4470f092cfb0644df59ea05dd Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Mon, 23 Dec 2013 20:31:24 +0000 Subject: target/iblock: Add blk_integrity + BIP passthrough support This patch adds blk_integrity passthrough support for block_device backends using IBLOCK. This includes iblock_alloc_bip() + setup of bio_integrity_payload information that attaches to the leading struct bio once bio_list is populated during fast-path iblock_execute_rw() I/O dispatch. It also updates setup in iblock_configure_device() to detect modes of protection + se dev->dev_attrib.pi_prot_type accordingly, along with creating required bio_set integrity mempools. Cc: Martin K. Petersen Cc: Christoph Hellwig Cc: Hannes Reinecke Cc: Sagi Grimberg Cc: Or Gerlitz Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig index 50aad2e..dc2d84a 100644 --- a/drivers/target/Kconfig +++ b/drivers/target/Kconfig @@ -14,6 +14,7 @@ if TARGET_CORE config TCM_IBLOCK tristate "TCM/IBLOCK Subsystem Plugin for Linux/BLOCK" + select BLK_DEV_INTEGRITY help Say Y here to enable the TCM/IBLOCK subsystem plugin for non-buffered access to Linux/Block devices using BIO diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 15d9121..293d9b0 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -91,6 +91,7 @@ static int iblock_configure_device(struct se_device *dev) struct iblock_dev *ib_dev = IBLOCK_DEV(dev); struct request_queue *q; struct block_device *bd = NULL; + struct blk_integrity *bi; fmode_t mode; int ret = -ENOMEM; @@ -155,8 +156,40 @@ static int iblock_configure_device(struct se_device *dev) if (blk_queue_nonrot(q)) dev->dev_attrib.is_nonrot = 1; + bi = bdev_get_integrity(bd); + if (bi) { + struct bio_set *bs = ib_dev->ibd_bio_set; + + if (!strcmp(bi->name, "T10-DIF-TYPE3-IP") || + !strcmp(bi->name, "T10-DIF-TYPE1-IP")) { + pr_err("IBLOCK export of blk_integrity: %s not" + " supported\n", bi->name); + ret = -ENOSYS; + goto out_blkdev_put; + } + + if (!strcmp(bi->name, "T10-DIF-TYPE3-CRC")) { + dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT; + } else if (!strcmp(bi->name, "T10-DIF-TYPE1-CRC")) { + dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT; + } + + if (dev->dev_attrib.pi_prot_type) { + if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) { + pr_err("Unable to allocate bioset for PI\n"); + ret = -ENOMEM; + goto out_blkdev_put; + } + pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n", + bs->bio_integrity_pool); + } + dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type; + } + return 0; +out_blkdev_put: + blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); out_free_bioset: bioset_free(ib_dev->ibd_bio_set); ib_dev->ibd_bio_set = NULL; @@ -170,8 +203,10 @@ static void iblock_free_device(struct se_device *dev) if (ib_dev->ibd_bd != NULL) blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); - if (ib_dev->ibd_bio_set != NULL) + if (ib_dev->ibd_bio_set != NULL) { + bioset_integrity_free(ib_dev->ibd_bio_set); bioset_free(ib_dev->ibd_bio_set); + } kfree(ib_dev); } @@ -586,13 +621,58 @@ static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b) return bl; } +static int +iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio) +{ + struct se_device *dev = cmd->se_dev; + struct blk_integrity *bi; + struct bio_integrity_payload *bip; + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); + struct scatterlist *sg; + int i, rc; + + bi = bdev_get_integrity(ib_dev->ibd_bd); + if (!bi) { + pr_err("Unable to locate bio_integrity\n"); + return -ENODEV; + } + + bip = bio_integrity_alloc(bio, GFP_NOIO, cmd->t_prot_nents); + if (!bip) { + pr_err("Unable to allocate bio_integrity_payload\n"); + return -ENOMEM; + } + + bip->bip_size = (cmd->data_length / dev->dev_attrib.block_size) * + dev->prot_length; + bip->bip_sector = bio->bi_sector; + + pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_size, + (unsigned long long)bip->bip_sector); + + for_each_sg(cmd->t_prot_sg, sg, cmd->t_prot_nents, i) { + + rc = bio_integrity_add_page(bio, sg_page(sg), sg->length, + sg->offset); + if (rc != sg->length) { + pr_err("bio_integrity_add_page() failed; %d\n", rc); + return -ENOMEM; + } + + pr_debug("Added bio integrity page: %p length: %d offset; %d\n", + sg_page(sg), sg->length, sg->offset); + } + + return 0; +} + static sense_reason_t iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, enum dma_data_direction data_direction) { struct se_device *dev = cmd->se_dev; struct iblock_req *ibr; - struct bio *bio; + struct bio *bio, *bio_start; struct bio_list list; struct scatterlist *sg; u32 sg_num = sgl_nents; @@ -655,6 +735,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, if (!bio) goto fail_free_ibr; + bio_start = bio; bio_list_init(&list); bio_list_add(&list, bio); @@ -688,6 +769,12 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, sg_num--; } + if (cmd->prot_type) { + int rc = iblock_alloc_bip(cmd, bio_start); + if (rc) + goto fail_put_bios; + } + iblock_submit_bios(&list, rw); iblock_complete_cmd(cmd); return 0; -- cgit v0.10.2 From 0f5e2ec46dd64579c0770f3822764f94db4fa465 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Sat, 18 Jan 2014 09:32:56 +0000 Subject: target/file: Add DIF protection init/format support This patch adds support for DIF protection init/format support into the FILEIO backend. It involves using a seperate $FILE.protection for storing PI that is opened via fd_init_prot() using the common pi_prot_type attribute. The actual formatting of the protection is done via fd_format_prot() using the common pi_prot_format attribute, that will populate the initial PI data based upon the currently configured pi_prot_type. Based on original FILEIO code from Sagi. v1 changes: - Fix sparse warnings in fd_init_format_buf (Fengguang) Cc: Martin K. Petersen Cc: Christoph Hellwig Cc: Hannes Reinecke Cc: Sagi Grimberg Cc: Or Gerlitz Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 0e34cda..119d519 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -700,6 +700,140 @@ static sector_t fd_get_blocks(struct se_device *dev) dev->dev_attrib.block_size); } +static int fd_init_prot(struct se_device *dev) +{ + struct fd_dev *fd_dev = FD_DEV(dev); + struct file *prot_file, *file = fd_dev->fd_file; + struct inode *inode; + int ret, flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC; + char buf[FD_MAX_DEV_PROT_NAME]; + + if (!file) { + pr_err("Unable to locate fd_dev->fd_file\n"); + return -ENODEV; + } + + inode = file->f_mapping->host; + if (S_ISBLK(inode->i_mode)) { + pr_err("FILEIO Protection emulation only supported on" + " !S_ISBLK\n"); + return -ENOSYS; + } + + if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) + flags &= ~O_DSYNC; + + snprintf(buf, FD_MAX_DEV_PROT_NAME, "%s.protection", + fd_dev->fd_dev_name); + + prot_file = filp_open(buf, flags, 0600); + if (IS_ERR(prot_file)) { + pr_err("filp_open(%s) failed\n", buf); + ret = PTR_ERR(prot_file); + return ret; + } + fd_dev->fd_prot_file = prot_file; + + return 0; +} + +static void fd_init_format_buf(struct se_device *dev, unsigned char *buf, + u32 unit_size, u32 *ref_tag, u16 app_tag, + bool inc_reftag) +{ + unsigned char *p = buf; + int i; + + for (i = 0; i < unit_size; i += dev->prot_length) { + *((u16 *)&p[0]) = 0xffff; + *((__be16 *)&p[2]) = cpu_to_be16(app_tag); + *((__be32 *)&p[4]) = cpu_to_be32(*ref_tag); + + if (inc_reftag) + (*ref_tag)++; + + p += dev->prot_length; + } +} + +static int fd_format_prot(struct se_device *dev) +{ + struct fd_dev *fd_dev = FD_DEV(dev); + struct file *prot_fd = fd_dev->fd_prot_file; + sector_t prot_length, prot; + unsigned char *buf; + loff_t pos = 0; + u32 ref_tag = 0; + int unit_size = FDBD_FORMAT_UNIT_SIZE * dev->dev_attrib.block_size; + int rc, ret = 0, size, len; + bool inc_reftag = false; + + if (!dev->dev_attrib.pi_prot_type) { + pr_err("Unable to format_prot while pi_prot_type == 0\n"); + return -ENODEV; + } + if (!prot_fd) { + pr_err("Unable to locate fd_dev->fd_prot_file\n"); + return -ENODEV; + } + + switch (dev->dev_attrib.pi_prot_type) { + case TARGET_DIF_TYPE3_PROT: + ref_tag = 0xffffffff; + break; + case TARGET_DIF_TYPE2_PROT: + case TARGET_DIF_TYPE1_PROT: + inc_reftag = true; + break; + default: + break; + } + + buf = vzalloc(unit_size); + if (!buf) { + pr_err("Unable to allocate FILEIO prot buf\n"); + return -ENOMEM; + } + + prot_length = (dev->transport->get_blocks(dev) + 1) * dev->prot_length; + size = prot_length; + + pr_debug("Using FILEIO prot_length: %llu\n", + (unsigned long long)prot_length); + + for (prot = 0; prot < prot_length; prot += unit_size) { + + fd_init_format_buf(dev, buf, unit_size, &ref_tag, 0xffff, + inc_reftag); + + len = min(unit_size, size); + + rc = kernel_write(prot_fd, buf, len, pos); + if (rc != len) { + pr_err("vfs_write to prot file failed: %d\n", rc); + ret = -ENODEV; + goto out; + } + pos += len; + size -= len; + } + +out: + vfree(buf); + return ret; +} + +static void fd_free_prot(struct se_device *dev) +{ + struct fd_dev *fd_dev = FD_DEV(dev); + + if (!fd_dev->fd_prot_file) + return; + + filp_close(fd_dev->fd_prot_file, NULL); + fd_dev->fd_prot_file = NULL; +} + static struct sbc_ops fd_sbc_ops = { .execute_rw = fd_execute_rw, .execute_sync_cache = fd_execute_sync_cache, @@ -730,6 +864,9 @@ static struct se_subsystem_api fileio_template = { .show_configfs_dev_params = fd_show_configfs_dev_params, .get_device_type = sbc_get_device_type, .get_blocks = fd_get_blocks, + .init_prot = fd_init_prot, + .format_prot = fd_format_prot, + .free_prot = fd_free_prot, }; static int __init fileio_module_init(void) diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h index 37ffc5b..583691e 100644 --- a/drivers/target/target_core_file.h +++ b/drivers/target/target_core_file.h @@ -4,6 +4,7 @@ #define FD_VERSION "4.0" #define FD_MAX_DEV_NAME 256 +#define FD_MAX_DEV_PROT_NAME FD_MAX_DEV_NAME + 16 #define FD_DEVICE_QUEUE_DEPTH 32 #define FD_MAX_DEVICE_QUEUE_DEPTH 128 #define FD_BLOCKSIZE 512 @@ -15,6 +16,8 @@ #define FBDF_HAS_PATH 0x01 #define FBDF_HAS_SIZE 0x02 #define FDBD_HAS_BUFFERED_IO_WCE 0x04 +#define FDBD_FORMAT_UNIT_SIZE 2048 + struct fd_dev { struct se_device dev; @@ -29,6 +32,7 @@ struct fd_dev { u32 fd_block_size; unsigned long long fd_dev_size; struct file *fd_file; + struct file *fd_prot_file; /* FILEIO HBA device is connected to */ struct fd_host *fd_host; } ____cacheline_aligned; -- cgit v0.10.2 From 42201b557471f2fef2e9e028b50a773d99ffc401 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Sat, 18 Jan 2014 09:33:48 +0000 Subject: target/file: Add DIF protection support to fd_execute_rw This patch adds support for DIF protection into fd_execute_rw() code for WRITE/READ I/O using sbc_dif_verify_[write,read]() logic. It adds fd_do_prot_rw() for handling interface with FILEIO PI, and uses a locally allocated fd_prot->prot_buf + fd_prot->prot_sg for interacting with SBC DIF verify emulation code. Cc: Martin K. Petersen Cc: Christoph Hellwig Cc: Hannes Reinecke Cc: Sagi Grimberg Cc: Or Gerlitz Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 119d519..aaba7c5 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -257,6 +257,72 @@ static void fd_free_device(struct se_device *dev) kfree(fd_dev); } +static int fd_do_prot_rw(struct se_cmd *cmd, struct fd_prot *fd_prot, + int is_write) +{ + struct se_device *se_dev = cmd->se_dev; + struct fd_dev *dev = FD_DEV(se_dev); + struct file *prot_fd = dev->fd_prot_file; + struct scatterlist *sg; + loff_t pos = (cmd->t_task_lba * se_dev->prot_length); + unsigned char *buf; + u32 prot_size, len, size; + int rc, ret = 1, i; + + prot_size = (cmd->data_length / se_dev->dev_attrib.block_size) * + se_dev->prot_length; + + if (!is_write) { + fd_prot->prot_buf = vzalloc(prot_size); + if (!fd_prot->prot_buf) { + pr_err("Unable to allocate fd_prot->prot_buf\n"); + return -ENOMEM; + } + buf = fd_prot->prot_buf; + + fd_prot->prot_sg_nents = cmd->t_prot_nents; + fd_prot->prot_sg = kzalloc(sizeof(struct scatterlist) * + fd_prot->prot_sg_nents, GFP_KERNEL); + if (!fd_prot->prot_sg) { + pr_err("Unable to allocate fd_prot->prot_sg\n"); + vfree(fd_prot->prot_buf); + return -ENOMEM; + } + size = prot_size; + + for_each_sg(fd_prot->prot_sg, sg, fd_prot->prot_sg_nents, i) { + + len = min_t(u32, PAGE_SIZE, size); + sg_set_buf(sg, buf, len); + size -= len; + buf += len; + } + } + + if (is_write) { + rc = kernel_write(prot_fd, fd_prot->prot_buf, prot_size, pos); + if (rc < 0 || prot_size != rc) { + pr_err("kernel_write() for fd_do_prot_rw failed:" + " %d\n", rc); + ret = -EINVAL; + } + } else { + rc = kernel_read(prot_fd, pos, fd_prot->prot_buf, prot_size); + if (rc < 0) { + pr_err("kernel_read() for fd_do_prot_rw failed:" + " %d\n", rc); + ret = -EINVAL; + } + } + + if (is_write || ret < 0) { + kfree(fd_prot->prot_sg); + vfree(fd_prot->prot_buf); + } + + return ret; +} + static int fd_do_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, int is_write) { @@ -551,6 +617,8 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, enum dma_data_direction data_direction) { struct se_device *dev = cmd->se_dev; + struct fd_prot fd_prot; + sense_reason_t rc; int ret = 0; /* @@ -558,8 +626,48 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, * physical memory addresses to struct iovec virtual memory. */ if (data_direction == DMA_FROM_DEVICE) { + memset(&fd_prot, 0, sizeof(struct fd_prot)); + + if (cmd->prot_type) { + ret = fd_do_prot_rw(cmd, &fd_prot, false); + if (ret < 0) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } + ret = fd_do_rw(cmd, sgl, sgl_nents, 0); + + if (ret > 0 && cmd->prot_type) { + u32 sectors = cmd->data_length / dev->dev_attrib.block_size; + + rc = sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, + 0, fd_prot.prot_sg, 0); + if (rc) { + kfree(fd_prot.prot_sg); + vfree(fd_prot.prot_buf); + return rc; + } + kfree(fd_prot.prot_sg); + vfree(fd_prot.prot_buf); + } } else { + memset(&fd_prot, 0, sizeof(struct fd_prot)); + + if (cmd->prot_type) { + u32 sectors = cmd->data_length / dev->dev_attrib.block_size; + + ret = fd_do_prot_rw(cmd, &fd_prot, false); + if (ret < 0) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + + rc = sbc_dif_verify_write(cmd, cmd->t_task_lba, sectors, + 0, fd_prot.prot_sg, 0); + if (rc) { + kfree(fd_prot.prot_sg); + vfree(fd_prot.prot_buf); + return rc; + } + } + ret = fd_do_rw(cmd, sgl, sgl_nents, 1); /* * Perform implicit vfs_fsync_range() for fd_do_writev() ops @@ -576,10 +684,19 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, vfs_fsync_range(fd_dev->fd_file, start, end, 1); } + + if (ret > 0 && cmd->prot_type) { + ret = fd_do_prot_rw(cmd, &fd_prot, true); + if (ret < 0) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } } - if (ret < 0) + if (ret < 0) { + kfree(fd_prot.prot_sg); + vfree(fd_prot.prot_buf); return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } if (ret) target_complete_cmd(cmd, SAM_STAT_GOOD); diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h index 583691e..97e5e7dd 100644 --- a/drivers/target/target_core_file.h +++ b/drivers/target/target_core_file.h @@ -18,6 +18,11 @@ #define FDBD_HAS_BUFFERED_IO_WCE 0x04 #define FDBD_FORMAT_UNIT_SIZE 2048 +struct fd_prot { + unsigned char *prot_buf; + struct scatterlist *prot_sg; + u32 prot_sg_nents; +}; struct fd_dev { struct se_device dev; -- cgit v0.10.2 From 4442dc8a92b8f9ad8ee9e7f8438f4c04c03a22dc Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Tue, 7 Jan 2014 22:40:27 +0000 Subject: target/rd: Refactor rd_build_device_space + rd_release_device_space This patch refactors rd_build_device_space() + rd_release_device_space() into rd_allocate_sgl_table() + rd_release_device_space() so that they may be used seperatly for setup + release of protection information scatterlists. Also add explicit memset of pages within rd_allocate_sgl_table() based upon passed 'init_payload' value. v2 changes: - Drop unused sg_table from rd_release_device_space (Wei) Cc: Martin K. Petersen Cc: Christoph Hellwig Cc: Hannes Reinecke Cc: Sagi Grimberg Cc: Or Gerlitz Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index 4ffe5f2..e9fa879 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c @@ -78,23 +78,14 @@ static void rd_detach_hba(struct se_hba *hba) hba->hba_ptr = NULL; } -/* rd_release_device_space(): - * - * - */ -static void rd_release_device_space(struct rd_dev *rd_dev) +static u32 rd_release_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table, + u32 sg_table_count) { - u32 i, j, page_count = 0, sg_per_table; - struct rd_dev_sg_table *sg_table; struct page *pg; struct scatterlist *sg; + u32 i, j, page_count = 0, sg_per_table; - if (!rd_dev->sg_table_array || !rd_dev->sg_table_count) - return; - - sg_table = rd_dev->sg_table_array; - - for (i = 0; i < rd_dev->sg_table_count; i++) { + for (i = 0; i < sg_table_count; i++) { sg = sg_table[i].sg_table; sg_per_table = sg_table[i].rd_sg_count; @@ -105,16 +96,28 @@ static void rd_release_device_space(struct rd_dev *rd_dev) page_count++; } } - kfree(sg); } + kfree(sg_table); + return page_count; +} + +static void rd_release_device_space(struct rd_dev *rd_dev) +{ + u32 page_count; + + if (!rd_dev->sg_table_array || !rd_dev->sg_table_count) + return; + + page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_table_array, + rd_dev->sg_table_count); + pr_debug("CORE_RD[%u] - Released device space for Ramdisk" " Device ID: %u, pages %u in %u tables total bytes %lu\n", rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count, rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE); - kfree(sg_table); rd_dev->sg_table_array = NULL; rd_dev->sg_table_count = 0; } @@ -124,38 +127,15 @@ static void rd_release_device_space(struct rd_dev *rd_dev) * * */ -static int rd_build_device_space(struct rd_dev *rd_dev) +static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table, + u32 total_sg_needed, unsigned char init_payload) { - u32 i = 0, j, page_offset = 0, sg_per_table, sg_tables, total_sg_needed; + u32 i = 0, j, page_offset = 0, sg_per_table; u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE / sizeof(struct scatterlist)); - struct rd_dev_sg_table *sg_table; struct page *pg; struct scatterlist *sg; - - if (rd_dev->rd_page_count <= 0) { - pr_err("Illegal page count: %u for Ramdisk device\n", - rd_dev->rd_page_count); - return -EINVAL; - } - - /* Don't need backing pages for NULLIO */ - if (rd_dev->rd_flags & RDF_NULLIO) - return 0; - - total_sg_needed = rd_dev->rd_page_count; - - sg_tables = (total_sg_needed / max_sg_per_table) + 1; - - sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL); - if (!sg_table) { - pr_err("Unable to allocate memory for Ramdisk" - " scatterlist tables\n"); - return -ENOMEM; - } - - rd_dev->sg_table_array = sg_table; - rd_dev->sg_table_count = sg_tables; + unsigned char *p; while (total_sg_needed) { sg_per_table = (total_sg_needed > max_sg_per_table) ? @@ -186,16 +166,59 @@ static int rd_build_device_space(struct rd_dev *rd_dev) } sg_assign_page(&sg[j], pg); sg[j].length = PAGE_SIZE; + + p = kmap(pg); + memset(p, init_payload, PAGE_SIZE); + kunmap(pg); } page_offset += sg_per_table; total_sg_needed -= sg_per_table; } + return 0; +} + +static int rd_build_device_space(struct rd_dev *rd_dev) +{ + struct rd_dev_sg_table *sg_table; + u32 sg_tables, total_sg_needed; + u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE / + sizeof(struct scatterlist)); + int rc; + + if (rd_dev->rd_page_count <= 0) { + pr_err("Illegal page count: %u for Ramdisk device\n", + rd_dev->rd_page_count); + return -EINVAL; + } + + /* Don't need backing pages for NULLIO */ + if (rd_dev->rd_flags & RDF_NULLIO) + return 0; + + total_sg_needed = rd_dev->rd_page_count; + + sg_tables = (total_sg_needed / max_sg_per_table) + 1; + + sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL); + if (!sg_table) { + pr_err("Unable to allocate memory for Ramdisk" + " scatterlist tables\n"); + return -ENOMEM; + } + + rd_dev->sg_table_array = sg_table; + rd_dev->sg_table_count = sg_tables; + + rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0x00); + if (rc) + return rc; + pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of" - " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id, - rd_dev->rd_dev_id, rd_dev->rd_page_count, - rd_dev->sg_table_count); + " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id, + rd_dev->rd_dev_id, rd_dev->rd_page_count, + rd_dev->sg_table_count); return 0; } -- cgit v0.10.2 From d7e8eb5d9216c6a4f963aa6d07e29680af17d739 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Tue, 7 Jan 2014 23:24:14 +0000 Subject: target/rd: Add support for protection SGL setup + release This patch adds rd_build_prot_space() + rd_release_prot_space() logic to setup + release protection information scatterlists. It also adds rd_init_prot() + rd_free_prot() se_subsystem_api callbacks used by target core code for setup + release of protection information. v2 changes: - Drop unused sg_table from rd_release_prot_space (Wei) - Drop rd_release_prot_space call from rd_free_device Cc: Martin K. Petersen Cc: Christoph Hellwig Cc: Hannes Reinecke Cc: Sagi Grimberg Cc: Or Gerlitz Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index e9fa879..660961d 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c @@ -223,6 +223,61 @@ static int rd_build_device_space(struct rd_dev *rd_dev) return 0; } +static void rd_release_prot_space(struct rd_dev *rd_dev) +{ + u32 page_count; + + if (!rd_dev->sg_prot_array || !rd_dev->sg_prot_count) + return; + + page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_prot_array, + rd_dev->sg_prot_count); + + pr_debug("CORE_RD[%u] - Released protection space for Ramdisk" + " Device ID: %u, pages %u in %u tables total bytes %lu\n", + rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count, + rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE); + + rd_dev->sg_prot_array = NULL; + rd_dev->sg_prot_count = 0; +} + +static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length) +{ + struct rd_dev_sg_table *sg_table; + u32 total_sg_needed, sg_tables; + u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE / + sizeof(struct scatterlist)); + int rc; + + if (rd_dev->rd_flags & RDF_NULLIO) + return 0; + + total_sg_needed = rd_dev->rd_page_count / prot_length; + + sg_tables = (total_sg_needed / max_sg_per_table) + 1; + + sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL); + if (!sg_table) { + pr_err("Unable to allocate memory for Ramdisk protection" + " scatterlist tables\n"); + return -ENOMEM; + } + + rd_dev->sg_prot_array = sg_table; + rd_dev->sg_prot_count = sg_tables; + + rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0xff); + if (rc) + return rc; + + pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u prot space of" + " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id, + rd_dev->rd_dev_id, total_sg_needed, rd_dev->sg_prot_count); + + return 0; +} + static struct se_device *rd_alloc_device(struct se_hba *hba, const char *name) { struct rd_dev *rd_dev; @@ -479,6 +534,23 @@ static sector_t rd_get_blocks(struct se_device *dev) return blocks_long; } +static int rd_init_prot(struct se_device *dev) +{ + struct rd_dev *rd_dev = RD_DEV(dev); + + if (!dev->dev_attrib.pi_prot_type) + return 0; + + return rd_build_prot_space(rd_dev, dev->prot_length); +} + +static void rd_free_prot(struct se_device *dev) +{ + struct rd_dev *rd_dev = RD_DEV(dev); + + rd_release_prot_space(rd_dev); +} + static struct sbc_ops rd_sbc_ops = { .execute_rw = rd_execute_rw, }; @@ -504,6 +576,8 @@ static struct se_subsystem_api rd_mcp_template = { .show_configfs_dev_params = rd_show_configfs_dev_params, .get_device_type = sbc_get_device_type, .get_blocks = rd_get_blocks, + .init_prot = rd_init_prot, + .free_prot = rd_free_prot, }; int __init rd_module_init(void) diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h index 1789d1e..cc46a6a 100644 --- a/drivers/target/target_core_rd.h +++ b/drivers/target/target_core_rd.h @@ -33,8 +33,12 @@ struct rd_dev { u32 rd_page_count; /* Number of SG tables in sg_table_array */ u32 sg_table_count; + /* Number of SG tables in sg_prot_array */ + u32 sg_prot_count; /* Array of rd_dev_sg_table_t containing scatterlists */ struct rd_dev_sg_table *sg_table_array; + /* Array of rd_dev_sg_table containing protection scatterlists */ + struct rd_dev_sg_table *sg_prot_array; /* Ramdisk HBA device is connected to */ struct rd_host *rd_host; } ____cacheline_aligned; -- cgit v0.10.2 From 6e611119b16c73a6e1c450d00deb4d7eef853336 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Wed, 8 Jan 2014 12:06:30 +0000 Subject: target/rd: Add DIF protection into rd_execute_rw This patch adds support for DIF protection into rd_execute_rw() code for WRITE/READ I/O using sbc_dif_verify_[write,read]() logic. It also adds rd_get_prot_table() for locating protection SGLs assoicated with the ramdisk backend device. v2 changes: - Make rd_execute_rw() to u32 sectors count instead of sector_t - Drop SCF_PROT usage Cc: Martin K. Petersen Cc: Christoph Hellwig Cc: Hannes Reinecke Cc: Sagi Grimberg Cc: Or Gerlitz Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index 660961d..66a5aba 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c @@ -356,6 +356,26 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page) return NULL; } +static struct rd_dev_sg_table *rd_get_prot_table(struct rd_dev *rd_dev, u32 page) +{ + struct rd_dev_sg_table *sg_table; + u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE / + sizeof(struct scatterlist)); + + i = page / sg_per_table; + if (i < rd_dev->sg_prot_count) { + sg_table = &rd_dev->sg_prot_array[i]; + if ((sg_table->page_start_offset <= page) && + (sg_table->page_end_offset >= page)) + return sg_table; + } + + pr_err("Unable to locate struct prot rd_dev_sg_table for page: %u\n", + page); + + return NULL; +} + static sense_reason_t rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, enum dma_data_direction data_direction) @@ -370,6 +390,7 @@ rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, u32 rd_page; u32 src_len; u64 tmp; + sense_reason_t rc; if (dev->rd_flags & RDF_NULLIO) { target_complete_cmd(cmd, SAM_STAT_GOOD); @@ -392,6 +413,28 @@ rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, data_direction == DMA_FROM_DEVICE ? "Read" : "Write", cmd->t_task_lba, rd_size, rd_page, rd_offset); + if (cmd->prot_type && data_direction == DMA_TO_DEVICE) { + struct rd_dev_sg_table *prot_table; + struct scatterlist *prot_sg; + u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size; + u32 prot_offset, prot_page; + + tmp = cmd->t_task_lba * se_dev->prot_length; + prot_offset = do_div(tmp, PAGE_SIZE); + prot_page = tmp; + + prot_table = rd_get_prot_table(dev, prot_page); + if (!prot_table) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + + prot_sg = &prot_table->sg_table[prot_page - prot_table->page_start_offset]; + + rc = sbc_dif_verify_write(cmd, cmd->t_task_lba, sectors, 0, + prot_sg, prot_offset); + if (rc) + return rc; + } + src_len = PAGE_SIZE - rd_offset; sg_miter_start(&m, sgl, sgl_nents, data_direction == DMA_FROM_DEVICE ? @@ -453,6 +496,28 @@ rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, } sg_miter_stop(&m); + if (cmd->prot_type && data_direction == DMA_FROM_DEVICE) { + struct rd_dev_sg_table *prot_table; + struct scatterlist *prot_sg; + u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size; + u32 prot_offset, prot_page; + + tmp = cmd->t_task_lba * se_dev->prot_length; + prot_offset = do_div(tmp, PAGE_SIZE); + prot_page = tmp; + + prot_table = rd_get_prot_table(dev, prot_page); + if (!prot_table) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + + prot_sg = &prot_table->sg_table[prot_page - prot_table->page_start_offset]; + + rc = sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 0, + prot_sg, prot_offset); + if (rc) + return rc; + } + target_complete_cmd(cmd, SAM_STAT_GOOD); return 0; } -- cgit v0.10.2 From 59dedde2b67130bf81a2df55df464909c7459df5 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Mon, 23 Dec 2013 20:39:23 +0000 Subject: tcm_loop: Enable DIF/DIX modes in SCSI host LLD This patch updates tcm_loop_driver_probe() to set protection information using scsi_host_set_prot() and scsi_host_set_guard(), which currently enabled all modes of DIF/DIX protection, minus DIX TYPE0. Also, update tcm_loop_submission_work() to pass struct scsi_cmnd related protection into target_submit_cmd_map_sgls() during CDB dispatch. Reviewed-by: Martin K. Petersen Cc: Christoph Hellwig Cc: Hannes Reinecke Cc: Sagi Grimberg Cc: Or Gerlitz Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index 112b795..fadad7c 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -217,7 +217,8 @@ static void tcm_loop_submission_work(struct work_struct *work) scsi_bufflen(sc), tcm_loop_sam_attr(sc), sc->sc_data_direction, 0, scsi_sglist(sc), scsi_sg_count(sc), - sgl_bidi, sgl_bidi_count, NULL, 0); + sgl_bidi, sgl_bidi_count, + scsi_prot_sglist(sc), scsi_prot_sg_count(sc)); if (rc < 0) { set_host_byte(sc, DID_NO_CONNECT); goto out_done; @@ -462,7 +463,7 @@ static int tcm_loop_driver_probe(struct device *dev) { struct tcm_loop_hba *tl_hba; struct Scsi_Host *sh; - int error; + int error, host_prot; tl_hba = to_tcm_loop_hba(dev); @@ -486,6 +487,13 @@ static int tcm_loop_driver_probe(struct device *dev) sh->max_channel = 0; sh->max_cmd_len = TL_SCSI_MAX_CMD_LEN; + host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | + SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION | + SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION; + + scsi_host_set_prot(sh, host_prot); + scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC); + error = scsi_add_host(sh, &tl_hba->dev); if (error) { pr_err("%s: scsi_add_host failed\n", __func__); -- cgit v0.10.2 From eb6ab13267be87dcad3e611500b7cb404ed4479c Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Thu, 9 Jan 2014 18:40:49 +0200 Subject: IB/isert: seperate connection protection domains and dma MRs It is more correct to seperate connections protection domains and dma_mr handles. protection information support requires to do so. Signed-off-by: Sagi Grimberg Signed-off-by: Nicholas Bellinger diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 6be57c3..3dd2427 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -248,13 +248,6 @@ isert_create_device_ib_res(struct isert_device *device) } cq_desc = device->cq_desc; - device->dev_pd = ib_alloc_pd(ib_dev); - if (IS_ERR(device->dev_pd)) { - ret = PTR_ERR(device->dev_pd); - pr_err("ib_alloc_pd failed for dev_pd: %d\n", ret); - goto out_cq_desc; - } - for (i = 0; i < device->cqs_used; i++) { cq_desc[i].device = device; cq_desc[i].cq_index = i; @@ -282,13 +275,6 @@ isert_create_device_ib_res(struct isert_device *device) goto out_cq; } - device->dev_mr = ib_get_dma_mr(device->dev_pd, IB_ACCESS_LOCAL_WRITE); - if (IS_ERR(device->dev_mr)) { - ret = PTR_ERR(device->dev_mr); - pr_err("ib_get_dma_mr failed for dev_mr: %d\n", ret); - goto out_cq; - } - return 0; out_cq: @@ -304,9 +290,6 @@ out_cq: ib_destroy_cq(device->dev_tx_cq[j]); } } - ib_dealloc_pd(device->dev_pd); - -out_cq_desc: kfree(device->cq_desc); return ret; @@ -329,8 +312,6 @@ isert_free_device_ib_res(struct isert_device *device) device->dev_tx_cq[i] = NULL; } - ib_dereg_mr(device->dev_mr); - ib_dealloc_pd(device->dev_pd); kfree(device->cq_desc); } @@ -437,7 +418,7 @@ isert_conn_create_frwr_pool(struct isert_conn *isert_conn) goto err; } - fr_desc->data_mr = ib_alloc_fast_reg_mr(device->dev_pd, + fr_desc->data_mr = ib_alloc_fast_reg_mr(isert_conn->conn_pd, ISCSI_ISER_SG_TABLESIZE); if (IS_ERR(fr_desc->data_mr)) { pr_err("Failed to allocate frmr err=%ld\n", @@ -546,8 +527,22 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) } isert_conn->conn_device = device; - isert_conn->conn_pd = device->dev_pd; - isert_conn->conn_mr = device->dev_mr; + isert_conn->conn_pd = ib_alloc_pd(isert_conn->conn_device->ib_device); + if (IS_ERR(isert_conn->conn_pd)) { + ret = PTR_ERR(isert_conn->conn_pd); + pr_err("ib_alloc_pd failed for conn %p: ret=%d\n", + isert_conn, ret); + goto out_pd; + } + + isert_conn->conn_mr = ib_get_dma_mr(isert_conn->conn_pd, + IB_ACCESS_LOCAL_WRITE); + if (IS_ERR(isert_conn->conn_mr)) { + ret = PTR_ERR(isert_conn->conn_mr); + pr_err("ib_get_dma_mr failed for conn %p: ret=%d\n", + isert_conn, ret); + goto out_mr; + } if (device->use_frwr) { ret = isert_conn_create_frwr_pool(isert_conn); @@ -573,6 +568,10 @@ out_conn_dev: if (device->use_frwr) isert_conn_free_frwr_pool(isert_conn); out_frwr: + ib_dereg_mr(isert_conn->conn_mr); +out_mr: + ib_dealloc_pd(isert_conn->conn_pd); +out_pd: isert_device_try_release(device); out_rsp_dma_map: ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma, @@ -611,6 +610,9 @@ isert_connect_release(struct isert_conn *isert_conn) isert_free_rx_descriptors(isert_conn); rdma_destroy_id(isert_conn->conn_cm_id); + ib_dereg_mr(isert_conn->conn_mr); + ib_dealloc_pd(isert_conn->conn_pd); + if (isert_conn->login_buf) { ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma, ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE); diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index 691f90f..dec74d4 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h @@ -144,8 +144,6 @@ struct isert_device { int refcount; int cq_active_qps[ISERT_MAX_CQ]; struct ib_device *ib_device; - struct ib_pd *dev_pd; - struct ib_mr *dev_mr; struct ib_cq *dev_rx_cq[ISERT_MAX_CQ]; struct ib_cq *dev_tx_cq[ISERT_MAX_CQ]; struct isert_cq_desc *cq_desc; -- cgit v0.10.2 From a3a5a82627492c8947d8866ddf421e9248088466 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Thu, 9 Jan 2014 18:40:50 +0200 Subject: IB/isert: Avoid frwr notation, user fastreg Use fast registration lingo. fast registration will also incorporate signature/DIF registration. Signed-off-by: Sagi Grimberg Signed-off-by: Nicholas Bellinger diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 3dd2427..295d2be 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -47,10 +47,10 @@ static int isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, struct isert_rdma_wr *wr); static void -isert_unreg_rdma_frwr(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn); +isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn); static int -isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd, - struct isert_rdma_wr *wr); +isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, + struct isert_rdma_wr *wr); static void isert_qp_event_callback(struct ib_event *e, void *context) @@ -225,11 +225,11 @@ isert_create_device_ib_res(struct isert_device *device) /* asign function handlers */ if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) { - device->use_frwr = 1; - device->reg_rdma_mem = isert_reg_rdma_frwr; - device->unreg_rdma_mem = isert_unreg_rdma_frwr; + device->use_fastreg = 1; + device->reg_rdma_mem = isert_reg_rdma; + device->unreg_rdma_mem = isert_unreg_rdma; } else { - device->use_frwr = 0; + device->use_fastreg = 0; device->reg_rdma_mem = isert_map_rdma; device->unreg_rdma_mem = isert_unmap_cmd; } @@ -237,9 +237,10 @@ isert_create_device_ib_res(struct isert_device *device) device->cqs_used = min_t(int, num_online_cpus(), device->ib_device->num_comp_vectors); device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used); - pr_debug("Using %d CQs, device %s supports %d vectors support FRWR %d\n", + pr_debug("Using %d CQs, device %s supports %d vectors support " + "Fast registration %d\n", device->cqs_used, device->ib_device->name, - device->ib_device->num_comp_vectors, device->use_frwr); + device->ib_device->num_comp_vectors, device->use_fastreg); device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) * device->cqs_used, GFP_KERNEL); if (!device->cq_desc) { @@ -367,18 +368,18 @@ isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id) } static void -isert_conn_free_frwr_pool(struct isert_conn *isert_conn) +isert_conn_free_fastreg_pool(struct isert_conn *isert_conn) { struct fast_reg_descriptor *fr_desc, *tmp; int i = 0; - if (list_empty(&isert_conn->conn_frwr_pool)) + if (list_empty(&isert_conn->conn_fr_pool)) return; - pr_debug("Freeing conn %p frwr pool", isert_conn); + pr_debug("Freeing conn %p fastreg pool", isert_conn); list_for_each_entry_safe(fr_desc, tmp, - &isert_conn->conn_frwr_pool, list) { + &isert_conn->conn_fr_pool, list) { list_del(&fr_desc->list); ib_free_fast_reg_page_list(fr_desc->data_frpl); ib_dereg_mr(fr_desc->data_mr); @@ -386,20 +387,20 @@ isert_conn_free_frwr_pool(struct isert_conn *isert_conn) ++i; } - if (i < isert_conn->conn_frwr_pool_size) + if (i < isert_conn->conn_fr_pool_size) pr_warn("Pool still has %d regions registered\n", - isert_conn->conn_frwr_pool_size - i); + isert_conn->conn_fr_pool_size - i); } static int -isert_conn_create_frwr_pool(struct isert_conn *isert_conn) +isert_conn_create_fastreg_pool(struct isert_conn *isert_conn) { struct fast_reg_descriptor *fr_desc; struct isert_device *device = isert_conn->conn_device; int i, ret; - INIT_LIST_HEAD(&isert_conn->conn_frwr_pool); - isert_conn->conn_frwr_pool_size = 0; + INIT_LIST_HEAD(&isert_conn->conn_fr_pool); + isert_conn->conn_fr_pool_size = 0; for (i = 0; i < ISCSI_DEF_XMIT_CMDS_MAX; i++) { fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL); if (!fr_desc) { @@ -431,17 +432,17 @@ isert_conn_create_frwr_pool(struct isert_conn *isert_conn) fr_desc, fr_desc->data_frpl->page_list); fr_desc->valid = true; - list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool); - isert_conn->conn_frwr_pool_size++; + list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool); + isert_conn->conn_fr_pool_size++; } - pr_debug("Creating conn %p frwr pool size=%d", - isert_conn, isert_conn->conn_frwr_pool_size); + pr_debug("Creating conn %p fastreg pool size=%d", + isert_conn, isert_conn->conn_fr_pool_size); return 0; err: - isert_conn_free_frwr_pool(isert_conn); + isert_conn_free_fastreg_pool(isert_conn); return ret; } @@ -544,11 +545,12 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) goto out_mr; } - if (device->use_frwr) { - ret = isert_conn_create_frwr_pool(isert_conn); + if (device->use_fastreg) { + ret = isert_conn_create_fastreg_pool(isert_conn); if (ret) { - pr_err("Conn: %p failed to create frwr_pool\n", isert_conn); - goto out_frwr; + pr_err("Conn: %p failed to create fastreg pool\n", + isert_conn); + goto out_fastreg; } } @@ -565,9 +567,9 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) return 0; out_conn_dev: - if (device->use_frwr) - isert_conn_free_frwr_pool(isert_conn); -out_frwr: + if (device->use_fastreg) + isert_conn_free_fastreg_pool(isert_conn); +out_fastreg: ib_dereg_mr(isert_conn->conn_mr); out_mr: ib_dealloc_pd(isert_conn->conn_pd); @@ -595,8 +597,8 @@ isert_connect_release(struct isert_conn *isert_conn) pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); - if (device && device->use_frwr) - isert_conn_free_frwr_pool(isert_conn); + if (device && device->use_fastreg) + isert_conn_free_fastreg_pool(isert_conn); if (isert_conn->conn_qp) { cq_index = ((struct isert_cq_desc *) @@ -1394,25 +1396,25 @@ isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) } static void -isert_unreg_rdma_frwr(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) +isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) { struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; struct ib_device *ib_dev = isert_conn->conn_cm_id->device; LIST_HEAD(unmap_list); - pr_debug("unreg_frwr_cmd: %p\n", isert_cmd); + pr_debug("unreg_fastreg_cmd: %p\n", isert_cmd); if (wr->fr_desc) { - pr_debug("unreg_frwr_cmd: %p free fr_desc %p\n", + pr_debug("unreg_fastreg_cmd: %p free fr_desc %p\n", isert_cmd, wr->fr_desc); spin_lock_bh(&isert_conn->conn_lock); - list_add_tail(&wr->fr_desc->list, &isert_conn->conn_frwr_pool); + list_add_tail(&wr->fr_desc->list, &isert_conn->conn_fr_pool); spin_unlock_bh(&isert_conn->conn_lock); wr->fr_desc = NULL; } if (wr->sge) { - pr_debug("unreg_frwr_cmd: %p unmap_sg op\n", isert_cmd); + pr_debug("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd); ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); @@ -2224,8 +2226,8 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc, } static int -isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd, - struct isert_rdma_wr *wr) +isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, + struct isert_rdma_wr *wr) { struct se_cmd *se_cmd = &cmd->se_cmd; struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); @@ -2303,7 +2305,7 @@ isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd, wr->fr_desc = NULL; } else { spin_lock_irqsave(&isert_conn->conn_lock, flags); - fr_desc = list_first_entry(&isert_conn->conn_frwr_pool, + fr_desc = list_first_entry(&isert_conn->conn_fr_pool, struct fast_reg_descriptor, list); list_del(&fr_desc->list); spin_unlock_irqrestore(&isert_conn->conn_lock, flags); @@ -2312,7 +2314,7 @@ isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd, ret = isert_fast_reg_mr(fr_desc, isert_cmd, isert_conn, ib_sge, offset, data_len); if (ret) { - list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool); + list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool); goto unmap_sg; } } diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index dec74d4..708a069 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h @@ -119,9 +119,9 @@ struct isert_conn { wait_queue_head_t conn_wait; wait_queue_head_t conn_wait_comp_err; struct kref conn_kref; - struct list_head conn_frwr_pool; - int conn_frwr_pool_size; - /* lock to protect frwr_pool */ + struct list_head conn_fr_pool; + int conn_fr_pool_size; + /* lock to protect fastreg pool */ spinlock_t conn_lock; #define ISERT_COMP_BATCH_COUNT 8 int conn_comp_batch; @@ -139,7 +139,7 @@ struct isert_cq_desc { }; struct isert_device { - int use_frwr; + int use_fastreg; int cqs_used; int refcount; int cq_active_qps[ISERT_MAX_CQ]; -- cgit v0.10.2 From dc87a90f5d61d7f01cfb63d92d5eaa27718e5b19 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Thu, 9 Jan 2014 18:40:51 +0200 Subject: IB/isert: Move fastreg descriptor creation to a function This routine may be called both by fast registration descriptors for data and for integrity buffers. This patch does not change any functionality. Signed-off-by: Sagi Grimberg Signed-off-by: Nicholas Bellinger diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 295d2be..9ef9193 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -393,6 +393,33 @@ isert_conn_free_fastreg_pool(struct isert_conn *isert_conn) } static int +isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd, + struct fast_reg_descriptor *fr_desc) +{ + fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device, + ISCSI_ISER_SG_TABLESIZE); + if (IS_ERR(fr_desc->data_frpl)) { + pr_err("Failed to allocate data frpl err=%ld\n", + PTR_ERR(fr_desc->data_frpl)); + return PTR_ERR(fr_desc->data_frpl); + } + + fr_desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE); + if (IS_ERR(fr_desc->data_mr)) { + pr_err("Failed to allocate data frmr err=%ld\n", + PTR_ERR(fr_desc->data_mr)); + ib_free_fast_reg_page_list(fr_desc->data_frpl); + return PTR_ERR(fr_desc->data_mr); + } + pr_debug("Create fr_desc %p page_list %p\n", + fr_desc, fr_desc->data_frpl->page_list); + + fr_desc->valid = true; + + return 0; +} + +static int isert_conn_create_fastreg_pool(struct isert_conn *isert_conn) { struct fast_reg_descriptor *fr_desc; @@ -409,29 +436,14 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn) goto err; } - fr_desc->data_frpl = - ib_alloc_fast_reg_page_list(device->ib_device, - ISCSI_ISER_SG_TABLESIZE); - if (IS_ERR(fr_desc->data_frpl)) { - pr_err("Failed to allocate fr_pg_list err=%ld\n", - PTR_ERR(fr_desc->data_frpl)); - ret = PTR_ERR(fr_desc->data_frpl); - goto err; - } - - fr_desc->data_mr = ib_alloc_fast_reg_mr(isert_conn->conn_pd, - ISCSI_ISER_SG_TABLESIZE); - if (IS_ERR(fr_desc->data_mr)) { - pr_err("Failed to allocate frmr err=%ld\n", - PTR_ERR(fr_desc->data_mr)); - ret = PTR_ERR(fr_desc->data_mr); - ib_free_fast_reg_page_list(fr_desc->data_frpl); + ret = isert_create_fr_desc(device->ib_device, + isert_conn->conn_pd, fr_desc); + if (ret) { + pr_err("Failed to create fastreg descriptor err=%d\n", + ret); goto err; } - pr_debug("Create fr_desc %p page_list %p\n", - fr_desc, fr_desc->data_frpl->page_list); - fr_desc->valid = true; list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool); isert_conn->conn_fr_pool_size++; } -- cgit v0.10.2 From 9bd626e79df67b2ba3b0c91a4640ab7bca1af04d Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Thu, 9 Jan 2014 18:40:54 +0200 Subject: IB/isert: pass scatterlist instead of cmd to fast_reg_mr routine This routine may help for protection registration as well. This patch does not change any functionality. Signed-off-by: Sagi Grimberg Signed-off-by: Nicholas Bellinger diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 9ef9193..421182b 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -2169,26 +2169,22 @@ isert_map_fr_pagelist(struct ib_device *ib_dev, static int isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc, - struct isert_cmd *isert_cmd, struct isert_conn *isert_conn, - struct ib_sge *ib_sge, u32 offset, unsigned int data_len) + struct isert_conn *isert_conn, struct scatterlist *sg_start, + struct ib_sge *ib_sge, u32 sg_nents, u32 offset, + unsigned int data_len) { - struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; struct ib_device *ib_dev = isert_conn->conn_cm_id->device; - struct scatterlist *sg_start; - u32 sg_off, page_off; struct ib_send_wr fr_wr, inv_wr; struct ib_send_wr *bad_wr, *wr = NULL; + int ret, pagelist_len; + u32 page_off; u8 key; - int ret, sg_nents, pagelist_len; - sg_off = offset / PAGE_SIZE; - sg_start = &cmd->se_cmd.t_data_sg[sg_off]; - sg_nents = min_t(unsigned int, cmd->se_cmd.t_data_nents - sg_off, - ISCSI_ISER_SG_TABLESIZE); + sg_nents = min_t(unsigned int, sg_nents, ISCSI_ISER_SG_TABLESIZE); page_off = offset % PAGE_SIZE; - pr_debug("Cmd: %p use fr_desc %p sg_nents %d sg_off %d offset %u\n", - isert_cmd, fr_desc, sg_nents, sg_off, offset); + pr_debug("Use fr_desc %p sg_nents %d offset %u\n", + fr_desc, sg_nents, offset); pagelist_len = isert_map_fr_pagelist(ib_dev, sg_start, sg_nents, &fr_desc->data_frpl->page_list[0]); @@ -2257,9 +2253,9 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { data_left = se_cmd->data_length; } else { - sg_off = cmd->write_data_done / PAGE_SIZE; - data_left = se_cmd->data_length - cmd->write_data_done; offset = cmd->write_data_done; + sg_off = offset / PAGE_SIZE; + data_left = se_cmd->data_length - cmd->write_data_done; isert_cmd->tx_desc.isert_cmd = isert_cmd; } @@ -2323,8 +2319,8 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, spin_unlock_irqrestore(&isert_conn->conn_lock, flags); wr->fr_desc = fr_desc; - ret = isert_fast_reg_mr(fr_desc, isert_cmd, isert_conn, - ib_sge, offset, data_len); + ret = isert_fast_reg_mr(fr_desc, isert_conn, sg_start, + ib_sge, sg_nents, offset, data_len); if (ret) { list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool); goto unmap_sg; -- cgit v0.10.2 From 2930d441592c16b83237ad015628cba5f41cc095 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Tue, 14 Jan 2014 20:39:56 -0800 Subject: qla2xxx: Fix scsi_host leak on qlt_lport_register callback failure This patch fixes a possible scsi_host reference leak in qlt_lport_register(), when a non zero return from the passed (*callback) does not call drop the local reference via scsi_host_put() before returning. This currently does not effect existing tcm_qla2xxx code as the passed callback will never fail, but fix this up regardless for future code. Cc: Chad Dupuis Signed-off-by: Nicholas Bellinger diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 5964800..0b6c5f9 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -4290,6 +4290,7 @@ int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn, if (rc != 0) { ha->tgt.tgt_ops = NULL; ha->tgt.target_lport_ptr = NULL; + scsi_host_put(host); } mutex_unlock(&qla_tgt_mutex); return rc; -- cgit v0.10.2 From 0e8cd71ceca4c15ef544e3af01248bc869c28d8f Mon Sep 17 00:00:00 2001 From: Saurav Kashyap Date: Tue, 14 Jan 2014 20:40:38 -0800 Subject: qla2xxx: Enhancements to enable NPIV support for QLOGIC ISPs with TCM/LIO. Signed-off-by: Sawan Chandak Signed-off-by: Quinn Tran Signed-off-by: Saurav Kashyap Signed-off-by: Nicholas Bellinger diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 5f174b8..9c412b5 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -1994,6 +1994,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) vha->flags.delete_progress = 1; + qlt_remove_target(ha, vha); + fc_remove_host(vha->host); scsi_remove_host(vha->host); diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 93db74e..e7be8a5 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -2750,6 +2750,13 @@ struct qlfc_fw { uint32_t len; }; +struct scsi_qlt_host { + void *target_lport_ptr; + struct mutex tgt_mutex; + struct mutex tgt_host_action_mutex; + struct qla_tgt *qla_tgt; +}; + struct qlt_hw_data { /* Protected by hw lock */ uint32_t enable_class_2:1; @@ -2765,15 +2772,11 @@ struct qlt_hw_data { uint32_t __iomem *atio_q_in; uint32_t __iomem *atio_q_out; - void *target_lport_ptr; struct qla_tgt_func_tmpl *tgt_ops; - struct qla_tgt *qla_tgt; struct qla_tgt_cmd *cmds[DEFAULT_OUTSTANDING_COMMANDS]; uint16_t current_handle; struct qla_tgt_vp_map *tgt_vp_map; - struct mutex tgt_mutex; - struct mutex tgt_host_action_mutex; int saved_set; uint16_t saved_exchange_count; @@ -3441,6 +3444,7 @@ typedef struct scsi_qla_host { #define VP_ERR_FAB_LOGOUT 4 #define VP_ERR_ADAP_NORESOURCES 5 struct qla_hw_data *hw; + struct scsi_qlt_host vha_tgt; struct req_que *req; int fw_heartbeat_counter; int seconds_since_last_heartbeat; diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 0b6c5f9..34ed246 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -589,7 +589,7 @@ static struct qla_tgt_sess *qlt_create_sess( /* Check to avoid double sessions */ spin_lock_irqsave(&ha->hardware_lock, flags); - list_for_each_entry(sess, &ha->tgt.qla_tgt->sess_list, + list_for_each_entry(sess, &vha->vha_tgt.qla_tgt->sess_list, sess_list_entry) { if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005, @@ -626,7 +626,7 @@ static struct qla_tgt_sess *qlt_create_sess( return NULL; } - sess->tgt = ha->tgt.qla_tgt; + sess->tgt = vha->vha_tgt.qla_tgt; sess->vha = vha; sess->s_id = fcport->d_id; sess->loop_id = fcport->loop_id; @@ -634,7 +634,7 @@ static struct qla_tgt_sess *qlt_create_sess( ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006, "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n", - sess, ha->tgt.qla_tgt); + sess, vha->vha_tgt.qla_tgt); be_sid[0] = sess->s_id.b.domain; be_sid[1] = sess->s_id.b.area; @@ -661,8 +661,8 @@ static struct qla_tgt_sess *qlt_create_sess( memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name)); spin_lock_irqsave(&ha->hardware_lock, flags); - list_add_tail(&sess->sess_list_entry, &ha->tgt.qla_tgt->sess_list); - ha->tgt.qla_tgt->sess_count++; + list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list); + vha->vha_tgt.qla_tgt->sess_count++; spin_unlock_irqrestore(&ha->hardware_lock, flags); ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b, @@ -681,7 +681,7 @@ static struct qla_tgt_sess *qlt_create_sess( void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) { struct qla_hw_data *ha = vha->hw; - struct qla_tgt *tgt = ha->tgt.qla_tgt; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; struct qla_tgt_sess *sess; unsigned long flags; @@ -691,6 +691,9 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) if (!tgt || (fcport->port_type != FCT_INITIATOR)) return; + if (qla_ini_mode_enabled(vha)) + return; + spin_lock_irqsave(&ha->hardware_lock, flags); if (tgt->tgt_stop) { spin_unlock_irqrestore(&ha->hardware_lock, flags); @@ -700,9 +703,9 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) if (!sess) { spin_unlock_irqrestore(&ha->hardware_lock, flags); - mutex_lock(&ha->tgt.tgt_mutex); + mutex_lock(&vha->vha_tgt.tgt_mutex); sess = qlt_create_sess(vha, fcport, false); - mutex_unlock(&ha->tgt.tgt_mutex); + mutex_unlock(&vha->vha_tgt.tgt_mutex); spin_lock_irqsave(&ha->hardware_lock, flags); } else { @@ -738,7 +741,7 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport) { struct qla_hw_data *ha = vha->hw; - struct qla_tgt *tgt = ha->tgt.qla_tgt; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; struct qla_tgt_sess *sess; unsigned long flags; @@ -805,12 +808,12 @@ void qlt_stop_phase1(struct qla_tgt *tgt) * Mutex needed to sync with qla_tgt_fc_port_[added,deleted]. * Lock is needed, because we still can get an incoming packet. */ - mutex_lock(&ha->tgt.tgt_mutex); + mutex_lock(&vha->vha_tgt.tgt_mutex); spin_lock_irqsave(&ha->hardware_lock, flags); tgt->tgt_stop = 1; qlt_clear_tgt_db(tgt, true); spin_unlock_irqrestore(&ha->hardware_lock, flags); - mutex_unlock(&ha->tgt.tgt_mutex); + mutex_unlock(&vha->vha_tgt.tgt_mutex); flush_delayed_work(&tgt->sess_del_work); @@ -844,20 +847,21 @@ EXPORT_SYMBOL(qlt_stop_phase1); void qlt_stop_phase2(struct qla_tgt *tgt) { struct qla_hw_data *ha = tgt->ha; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); unsigned long flags; if (tgt->tgt_stopped) { - ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf04f, + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f, "Already in tgt->tgt_stopped state\n"); dump_stack(); return; } - ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00b, + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b, "Waiting for %d IRQ commands to complete (tgt %p)", tgt->irq_cmd_count, tgt); - mutex_lock(&ha->tgt.tgt_mutex); + mutex_lock(&vha->vha_tgt.tgt_mutex); spin_lock_irqsave(&ha->hardware_lock, flags); while (tgt->irq_cmd_count != 0) { spin_unlock_irqrestore(&ha->hardware_lock, flags); @@ -867,9 +871,9 @@ void qlt_stop_phase2(struct qla_tgt *tgt) tgt->tgt_stop = 0; tgt->tgt_stopped = 1; spin_unlock_irqrestore(&ha->hardware_lock, flags); - mutex_unlock(&ha->tgt.tgt_mutex); + mutex_unlock(&vha->vha_tgt.tgt_mutex); - ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00c, "Stop of tgt %p finished", + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished", tgt); } EXPORT_SYMBOL(qlt_stop_phase2); @@ -877,14 +881,14 @@ EXPORT_SYMBOL(qlt_stop_phase2); /* Called from qlt_remove_target() -> qla2x00_remove_one() */ static void qlt_release(struct qla_tgt *tgt) { - struct qla_hw_data *ha = tgt->ha; + scsi_qla_host_t *vha = tgt->vha; - if ((ha->tgt.qla_tgt != NULL) && !tgt->tgt_stopped) + if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stopped) qlt_stop_phase2(tgt); - ha->tgt.qla_tgt = NULL; + vha->vha_tgt.qla_tgt = NULL; - ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00d, + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d, "Release of tgt %p finished\n", tgt); kfree(tgt); @@ -948,8 +952,8 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha, return; } - if (ha->tgt.qla_tgt != NULL) - ha->tgt.qla_tgt->notify_ack_expected++; + if (vha->vha_tgt.qla_tgt != NULL) + vha->vha_tgt.qla_tgt->notify_ack_expected++; pkt->entry_type = NOTIFY_ACK_TYPE; pkt->entry_count = 1; @@ -1053,7 +1057,7 @@ static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha, /* Other bytes are zero */ } - ha->tgt.qla_tgt->abts_resp_expected++; + vha->vha_tgt.qla_tgt->abts_resp_expected++; qla2x00_start_iocbs(vha, vha->req); } @@ -1205,7 +1209,7 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha, ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012, "qla_target(%d): task abort for non-existant session\n", vha->vp_idx); - rc = qlt_sched_sess_work(ha->tgt.qla_tgt, + rc = qlt_sched_sess_work(vha->vha_tgt.qla_tgt, QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts)); if (rc != 0) { qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, @@ -2156,8 +2160,7 @@ static int qlt_prepare_srr_ctio(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd, void *ctio) { struct qla_tgt_srr_ctio *sc; - struct qla_hw_data *ha = vha->hw; - struct qla_tgt *tgt = ha->tgt.qla_tgt; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; struct qla_tgt_srr_imm *imm; tgt->ctio_srr_id++; @@ -2473,7 +2476,7 @@ static void qlt_do_work(struct work_struct *work) struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); scsi_qla_host_t *vha = cmd->vha; struct qla_hw_data *ha = vha->hw; - struct qla_tgt *tgt = ha->tgt.qla_tgt; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; struct qla_tgt_sess *sess = NULL; struct atio_from_isp *atio = &cmd->atio; unsigned char *cdb; @@ -2506,10 +2509,10 @@ static void qlt_do_work(struct work_struct *work) goto out_term; } - mutex_lock(&ha->tgt.tgt_mutex); + mutex_lock(&vha->vha_tgt.tgt_mutex); sess = qlt_make_local_sess(vha, s_id); /* sess has an extra creation ref. */ - mutex_unlock(&ha->tgt.tgt_mutex); + mutex_unlock(&vha->vha_tgt.tgt_mutex); if (!sess) goto out_term; @@ -2575,8 +2578,7 @@ out_term: static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, struct atio_from_isp *atio) { - struct qla_hw_data *ha = vha->hw; - struct qla_tgt *tgt = ha->tgt.qla_tgt; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; struct qla_tgt_cmd *cmd; if (unlikely(tgt->tgt_stop)) { @@ -2596,7 +2598,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, memcpy(&cmd->atio, atio, sizeof(*atio)); cmd->state = QLA_TGT_STATE_NEW; - cmd->tgt = ha->tgt.qla_tgt; + cmd->tgt = vha->vha_tgt.qla_tgt; cmd->vha = vha; INIT_WORK(&cmd->work, qlt_do_work); @@ -2722,7 +2724,7 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb) uint32_t lun, unpacked_lun; int lun_size, fn; - tgt = ha->tgt.qla_tgt; + tgt = vha->vha_tgt.qla_tgt; lun = a->u.isp24.fcp_cmnd.lun; lun_size = sizeof(a->u.isp24.fcp_cmnd.lun); @@ -2796,7 +2798,7 @@ static int qlt_abort_task(struct scsi_qla_host *vha, ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025, "qla_target(%d): task abort for unexisting " "session\n", vha->vp_idx); - return qlt_sched_sess_work(ha->tgt.qla_tgt, + return qlt_sched_sess_work(vha->vha_tgt.qla_tgt, QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb)); } @@ -2809,7 +2811,6 @@ static int qlt_abort_task(struct scsi_qla_host *vha, static int qlt_24xx_handle_els(struct scsi_qla_host *vha, struct imm_ntfy_from_isp *iocb) { - struct qla_hw_data *ha = vha->hw; int res = 0; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026, @@ -2827,7 +2828,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, case ELS_PDISC: case ELS_ADISC: { - struct qla_tgt *tgt = ha->tgt.qla_tgt; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; if (tgt->link_reinit_iocb_pending) { qlt_send_notify_ack(vha, &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0); @@ -3201,8 +3202,7 @@ static void qlt_prepare_srr_imm(struct scsi_qla_host *vha, struct imm_ntfy_from_isp *iocb) { struct qla_tgt_srr_imm *imm; - struct qla_hw_data *ha = vha->hw; - struct qla_tgt *tgt = ha->tgt.qla_tgt; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; struct qla_tgt_srr_ctio *sctio; tgt->imm_srr_id++; @@ -3312,7 +3312,7 @@ static void qlt_handle_imm_notify(struct scsi_qla_host *vha, case IMM_NTFY_LIP_LINK_REINIT: { - struct qla_tgt *tgt = ha->tgt.qla_tgt; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033, "qla_target(%d): LINK REINIT (loop %#x, " "subcode %x)\n", vha->vp_idx, @@ -3488,7 +3488,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, struct atio_from_isp *atio) { struct qla_hw_data *ha = vha->hw; - struct qla_tgt *tgt = ha->tgt.qla_tgt; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; int rc; if (unlikely(tgt == NULL)) { @@ -3590,7 +3590,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) { struct qla_hw_data *ha = vha->hw; - struct qla_tgt *tgt = ha->tgt.qla_tgt; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; if (unlikely(tgt == NULL)) { ql_dbg(ql_dbg_tgt, vha, 0xe05d, @@ -3793,7 +3793,7 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha, uint16_t *mailbox) { struct qla_hw_data *ha = vha->hw; - struct qla_tgt *tgt = ha->tgt.qla_tgt; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; int login_code; ql_dbg(ql_dbg_tgt, vha, 0xe039, @@ -3923,14 +3923,14 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha, static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha, uint8_t *s_id) { - struct qla_hw_data *ha = vha->hw; struct qla_tgt_sess *sess = NULL; fc_port_t *fcport = NULL; int rc, global_resets; uint16_t loop_id = 0; retry: - global_resets = atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count); + global_resets = + atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count); rc = qla24xx_get_loop_id(vha, s_id, &loop_id); if (rc != 0) { @@ -3957,12 +3957,13 @@ retry: return NULL; if (global_resets != - atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)) { + atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043, "qla_target(%d): global reset during session discovery " "(counter was %d, new %d), retrying", vha->vp_idx, global_resets, - atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)); + atomic_read(&vha->vha_tgt. + qla_tgt->tgt_global_resets_count)); goto retry; } @@ -3997,10 +3998,10 @@ static void qlt_abort_work(struct qla_tgt *tgt, if (!sess) { spin_unlock_irqrestore(&ha->hardware_lock, flags); - mutex_lock(&ha->tgt.tgt_mutex); + mutex_lock(&vha->vha_tgt.tgt_mutex); sess = qlt_make_local_sess(vha, s_id); /* sess has got an extra creation ref */ - mutex_unlock(&ha->tgt.tgt_mutex); + mutex_unlock(&vha->vha_tgt.tgt_mutex); spin_lock_irqsave(&ha->hardware_lock, flags); if (!sess) @@ -4051,10 +4052,10 @@ static void qlt_tmr_work(struct qla_tgt *tgt, if (!sess) { spin_unlock_irqrestore(&ha->hardware_lock, flags); - mutex_lock(&ha->tgt.tgt_mutex); + mutex_lock(&vha->vha_tgt.tgt_mutex); sess = qlt_make_local_sess(vha, s_id); /* sess has got an extra creation ref */ - mutex_unlock(&ha->tgt.tgt_mutex); + mutex_unlock(&vha->vha_tgt.tgt_mutex); spin_lock_irqsave(&ha->hardware_lock, flags); if (!sess) @@ -4140,9 +4141,9 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha) } ql_dbg(ql_dbg_tgt, base_vha, 0xe03b, - "Registering target for host %ld(%p)", base_vha->host_no, ha); + "Registering target for host %ld(%p).\n", base_vha->host_no, ha); - BUG_ON((ha->tgt.qla_tgt != NULL) || (ha->tgt.tgt_ops != NULL)); + BUG_ON(base_vha->vha_tgt.qla_tgt != NULL); tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL); if (!tgt) { @@ -4170,7 +4171,7 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha) INIT_WORK(&tgt->srr_work, qlt_handle_srr_work); atomic_set(&tgt->tgt_global_resets_count, 0); - ha->tgt.qla_tgt = tgt; + base_vha->vha_tgt.qla_tgt = tgt; ql_dbg(ql_dbg_tgt, base_vha, 0xe067, "qla_target(%d): using 64 Bit PCI addressing", @@ -4191,16 +4192,16 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha) /* Must be called under tgt_host_action_mutex */ int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha) { - if (!ha->tgt.qla_tgt) + if (!vha->vha_tgt.qla_tgt) return 0; mutex_lock(&qla_tgt_mutex); - list_del(&ha->tgt.qla_tgt->tgt_list_entry); + list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry); mutex_unlock(&qla_tgt_mutex); ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)", vha->host_no, ha); - qlt_release(ha->tgt.qla_tgt); + qlt_release(vha->vha_tgt.qla_tgt); return 0; } @@ -4254,9 +4255,6 @@ int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn, if (!host) continue; - if (ha->tgt.tgt_ops != NULL) - continue; - if (!(host->hostt->supported_mode & MODE_TARGET)) continue; @@ -4285,11 +4283,11 @@ int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn, * Setup passed parameters ahead of invoking callback */ ha->tgt.tgt_ops = qla_tgt_ops; - ha->tgt.target_lport_ptr = target_lport_ptr; + vha->vha_tgt.target_lport_ptr = target_lport_ptr; rc = (*callback)(vha); if (rc != 0) { ha->tgt.tgt_ops = NULL; - ha->tgt.target_lport_ptr = NULL; + vha->vha_tgt.target_lport_ptr = NULL; scsi_host_put(host); } mutex_unlock(&qla_tgt_mutex); @@ -4313,7 +4311,7 @@ void qlt_lport_deregister(struct scsi_qla_host *vha) /* * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data */ - ha->tgt.target_lport_ptr = NULL; + vha->vha_tgt.target_lport_ptr = NULL; ha->tgt.tgt_ops = NULL; /* * Release the Scsi_Host reference for the underlying qla2xxx host @@ -4375,8 +4373,9 @@ void qlt_enable_vha(struct scsi_qla_host *vha) { struct qla_hw_data *ha = vha->hw; - struct qla_tgt *tgt = ha->tgt.qla_tgt; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; unsigned long flags; + scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); if (!tgt) { ql_dbg(ql_dbg_tgt, vha, 0xe069, @@ -4391,9 +4390,14 @@ qlt_enable_vha(struct scsi_qla_host *vha) qlt_set_mode(vha); spin_unlock_irqrestore(&ha->hardware_lock, flags); - set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); - qla2xxx_wake_dpc(vha); - qla2x00_wait_for_hba_online(vha); + if (vha->vp_idx) { + qla24xx_disable_vp(vha); + qla24xx_enable_vp(vha); + } else { + set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); + qla2xxx_wake_dpc(base_vha); + qla2x00_wait_for_hba_online(base_vha); + } } EXPORT_SYMBOL(qlt_enable_vha); @@ -4406,7 +4410,7 @@ void qlt_disable_vha(struct scsi_qla_host *vha) { struct qla_hw_data *ha = vha->hw; - struct qla_tgt *tgt = ha->tgt.qla_tgt; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; unsigned long flags; if (!tgt) { @@ -4437,8 +4441,10 @@ qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha) if (!qla_tgt_mode_enabled(vha)) return; - mutex_init(&ha->tgt.tgt_mutex); - mutex_init(&ha->tgt.tgt_host_action_mutex); + vha->vha_tgt.qla_tgt = NULL; + + mutex_init(&vha->vha_tgt.tgt_mutex); + mutex_init(&vha->vha_tgt.tgt_host_action_mutex); qlt_clear_mode(vha); @@ -4449,6 +4455,8 @@ qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha) * assigning the value appropriately. */ ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; + + qlt_add_target(ha, vha); } void @@ -4767,8 +4775,8 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out; } - mutex_init(&ha->tgt.tgt_mutex); - mutex_init(&ha->tgt.tgt_host_action_mutex); + mutex_init(&base_vha->vha_tgt.tgt_mutex); + mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex); qlt_clear_mode(base_vha); } diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 7eb19be..113ca95 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -53,16 +53,6 @@ struct workqueue_struct *tcm_qla2xxx_free_wq; struct workqueue_struct *tcm_qla2xxx_cmd_wq; -static int tcm_qla2xxx_check_true(struct se_portal_group *se_tpg) -{ - return 1; -} - -static int tcm_qla2xxx_check_false(struct se_portal_group *se_tpg) -{ - return 0; -} - /* * Parse WWN. * If strict, we require lower-case hex and colon separators to be sure @@ -174,7 +164,7 @@ static int tcm_qla2xxx_npiv_parse_wwn( *wwnn = 0; /* count may include a LF at end of string */ - if (name[cnt-1] == '\n') + if (name[cnt-1] == '\n' || name[cnt-1] == 0) cnt--; /* validate we have enough characters for WWPN */ @@ -777,6 +767,9 @@ static void tcm_qla2xxx_put_session(struct se_session *se_sess) static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess) { + if (!sess) + return; + assert_spin_locked(&sess->vha->hw->hardware_lock); kref_put(&sess->se_sess->sess_kref, tcm_qla2xxx_release_session); } @@ -957,7 +950,6 @@ static ssize_t tcm_qla2xxx_tpg_store_enable( struct tcm_qla2xxx_lport *lport = container_of(se_wwn, struct tcm_qla2xxx_lport, lport_wwn); struct scsi_qla_host *vha = lport->qla_vha; - struct qla_hw_data *ha = vha->hw; struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg); unsigned long op; @@ -977,12 +969,12 @@ static ssize_t tcm_qla2xxx_tpg_store_enable( atomic_set(&tpg->lport_tpg_enabled, 1); qlt_enable_vha(vha); } else { - if (!ha->tgt.qla_tgt) { - pr_err("truct qla_hw_data *ha->tgt.qla_tgt is NULL\n"); + if (!vha->vha_tgt.qla_tgt) { + pr_err("struct qla_hw_data *vha->vha_tgt.qla_tgt is NULL\n"); return -ENODEV; } atomic_set(&tpg->lport_tpg_enabled, 0); - qlt_stop_phase1(ha->tgt.qla_tgt); + qlt_stop_phase1(vha->vha_tgt.qla_tgt); } return count; @@ -1011,7 +1003,7 @@ static struct se_portal_group *tcm_qla2xxx_make_tpg( if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX) return ERR_PTR(-EINVAL); - if (!lport->qla_npiv_vp && (tpgt != 1)) { + if ((tpgt != 1)) { pr_err("In non NPIV mode, a single TPG=1 is used for HW port mappings\n"); return ERR_PTR(-ENOSYS); } @@ -1038,11 +1030,8 @@ static struct se_portal_group *tcm_qla2xxx_make_tpg( kfree(tpg); return NULL; } - /* - * Setup local TPG=1 pointer for non NPIV mode. - */ - if (lport->qla_npiv_vp == NULL) - lport->tpg_1 = tpg; + + lport->tpg_1 = tpg; return &tpg->se_tpg; } @@ -1053,19 +1042,17 @@ static void tcm_qla2xxx_drop_tpg(struct se_portal_group *se_tpg) struct tcm_qla2xxx_tpg, se_tpg); struct tcm_qla2xxx_lport *lport = tpg->lport; struct scsi_qla_host *vha = lport->qla_vha; - struct qla_hw_data *ha = vha->hw; /* * Call into qla2x_target.c LLD logic to shutdown the active * FC Nexuses and disable target mode operation for this qla_hw_data */ - if (ha->tgt.qla_tgt && !ha->tgt.qla_tgt->tgt_stop) - qlt_stop_phase1(ha->tgt.qla_tgt); + if (vha->vha_tgt.qla_tgt && !vha->vha_tgt.qla_tgt->tgt_stop) + qlt_stop_phase1(vha->vha_tgt.qla_tgt); core_tpg_deregister(se_tpg); /* * Clear local TPG=1 pointer for non NPIV mode. */ - if (lport->qla_npiv_vp == NULL) lport->tpg_1 = NULL; kfree(tpg); @@ -1095,12 +1082,22 @@ static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg( tpg->lport = lport; tpg->lport_tpgt = tpgt; + /* + * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic + * NodeACLs + */ + tpg->tpg_attrib.generate_node_acls = 1; + tpg->tpg_attrib.demo_mode_write_protect = 1; + tpg->tpg_attrib.cache_dynamic_acls = 1; + tpg->tpg_attrib.demo_mode_login_only = 1; + ret = core_tpg_register(&tcm_qla2xxx_npiv_fabric_configfs->tf_ops, wwn, &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); if (ret < 0) { kfree(tpg); return NULL; } + lport->tpg_1 = tpg; return &tpg->se_tpg; } @@ -1111,13 +1108,12 @@ static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id( scsi_qla_host_t *vha, const uint8_t *s_id) { - struct qla_hw_data *ha = vha->hw; struct tcm_qla2xxx_lport *lport; struct se_node_acl *se_nacl; struct tcm_qla2xxx_nacl *nacl; u32 key; - lport = ha->tgt.target_lport_ptr; + lport = vha->vha_tgt.target_lport_ptr; if (!lport) { pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); dump_stack(); @@ -1221,13 +1217,12 @@ static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_loop_id( scsi_qla_host_t *vha, const uint16_t loop_id) { - struct qla_hw_data *ha = vha->hw; struct tcm_qla2xxx_lport *lport; struct se_node_acl *se_nacl; struct tcm_qla2xxx_nacl *nacl; struct tcm_qla2xxx_fc_loopid *fc_loopid; - lport = ha->tgt.target_lport_ptr; + lport = vha->vha_tgt.target_lport_ptr; if (!lport) { pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); dump_stack(); @@ -1341,6 +1336,7 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess) { struct qla_tgt *tgt = sess->tgt; struct qla_hw_data *ha = tgt->ha; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); struct se_session *se_sess; struct se_node_acl *se_nacl; struct tcm_qla2xxx_lport *lport; @@ -1357,7 +1353,7 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess) se_nacl = se_sess->se_node_acl; nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); - lport = ha->tgt.target_lport_ptr; + lport = vha->vha_tgt.target_lport_ptr; if (!lport) { pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); dump_stack(); @@ -1391,7 +1387,7 @@ static int tcm_qla2xxx_check_initiator_node_acl( unsigned char port_name[36]; unsigned long flags; - lport = ha->tgt.target_lport_ptr; + lport = vha->vha_tgt.target_lport_ptr; if (!lport) { pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); dump_stack(); @@ -1455,7 +1451,8 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id, { struct qla_tgt *tgt = sess->tgt; struct qla_hw_data *ha = tgt->ha; - struct tcm_qla2xxx_lport *lport = ha->tgt.target_lport_ptr; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + struct tcm_qla2xxx_lport *lport = vha->vha_tgt.target_lport_ptr; struct se_node_acl *se_nacl = sess->se_sess->se_node_acl; struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); @@ -1564,13 +1561,12 @@ static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport) static int tcm_qla2xxx_lport_register_cb(struct scsi_qla_host *vha) { - struct qla_hw_data *ha = vha->hw; struct tcm_qla2xxx_lport *lport; /* * Setup local pointer to vha, NPIV VP pointer (if present) and * vha->tcm_lport pointer */ - lport = (struct tcm_qla2xxx_lport *)ha->tgt.target_lport_ptr; + lport = (struct tcm_qla2xxx_lport *)vha->vha_tgt.target_lport_ptr; lport->qla_vha = vha; return 0; @@ -1621,7 +1617,6 @@ static void tcm_qla2xxx_drop_lport(struct se_wwn *wwn) struct tcm_qla2xxx_lport *lport = container_of(wwn, struct tcm_qla2xxx_lport, lport_wwn); struct scsi_qla_host *vha = lport->qla_vha; - struct qla_hw_data *ha = vha->hw; struct se_node_acl *node; u32 key = 0; @@ -1630,8 +1625,8 @@ static void tcm_qla2xxx_drop_lport(struct se_wwn *wwn) * shutdown of struct qla_tgt after the call to * qlt_stop_phase1() from tcm_qla2xxx_drop_tpg() above.. */ - if (ha->tgt.qla_tgt && !ha->tgt.qla_tgt->tgt_stopped) - qlt_stop_phase2(ha->tgt.qla_tgt); + if (vha->vha_tgt.qla_tgt && !vha->vha_tgt.qla_tgt->tgt_stopped) + qlt_stop_phase2(vha->vha_tgt.qla_tgt); qlt_lport_deregister(vha); @@ -1650,6 +1645,9 @@ static struct se_wwn *tcm_qla2xxx_npiv_make_lport( struct tcm_qla2xxx_lport *lport; u64 npiv_wwpn, npiv_wwnn; int ret; + struct scsi_qla_host *vha = NULL; + struct qla_hw_data *ha = NULL; + scsi_qla_host_t *base_vha = NULL; if (tcm_qla2xxx_npiv_parse_wwn(name, strlen(name)+1, &npiv_wwpn, &npiv_wwnn) < 0) @@ -1666,12 +1664,29 @@ static struct se_wwn *tcm_qla2xxx_npiv_make_lport( TCM_QLA2XXX_NAMELEN, npiv_wwpn, npiv_wwnn); sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) npiv_wwpn); -/* FIXME: tcm_qla2xxx_npiv_make_lport */ - ret = -ENOSYS; + ret = tcm_qla2xxx_init_lport(lport); if (ret != 0) goto out; + ret = qlt_lport_register(&tcm_qla2xxx_template, npiv_wwpn, + tcm_qla2xxx_lport_register_cb, lport); + + if (ret != 0) + goto out_lport; + + vha = lport->qla_vha; + ha = vha->hw; + base_vha = pci_get_drvdata(ha->pdev); + + if (!qla_tgt_mode_enabled(base_vha)) { + ret = -EPERM; + goto out_lport; + } + return &lport->lport_wwn; +out_lport: + vfree(lport->lport_loopid_map); + btree_destroy32(&lport->lport_fcport_map); out: kfree(lport); return ERR_PTR(ret); @@ -1684,9 +1699,9 @@ static void tcm_qla2xxx_npiv_drop_lport(struct se_wwn *wwn) struct scsi_qla_host *vha = lport->qla_vha; struct Scsi_Host *sh = vha->host; /* - * Notify libfc that we want to release the lport->npiv_vport + * Notify libfc that we want to release the vha->fc_vport */ - fc_vport_terminate(lport->npiv_vport); + fc_vport_terminate(vha->fc_vport); scsi_host_put(sh); kfree(lport); @@ -1769,14 +1784,16 @@ static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = { .tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id, .tpg_get_pr_transport_id_len = tcm_qla2xxx_get_pr_transport_id_len, .tpg_parse_pr_out_transport_id = tcm_qla2xxx_parse_pr_out_transport_id, - .tpg_check_demo_mode = tcm_qla2xxx_check_false, - .tpg_check_demo_mode_cache = tcm_qla2xxx_check_true, - .tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_true, - .tpg_check_prod_mode_write_protect = tcm_qla2xxx_check_false, + .tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode, + .tpg_check_demo_mode_cache = tcm_qla2xxx_check_demo_mode_cache, + .tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_demo_mode, + .tpg_check_prod_mode_write_protect = + tcm_qla2xxx_check_prod_write_protect, .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only, .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl, .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl, .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, + .check_stop_free = tcm_qla2xxx_check_stop_free, .release_cmd = tcm_qla2xxx_release_cmd, .put_session = tcm_qla2xxx_put_session, .shutdown_session = tcm_qla2xxx_shutdown_session, @@ -1871,7 +1888,8 @@ static int tcm_qla2xxx_register_configfs(void) * Setup default attribute lists for various npiv_fabric->tf_cit_tmpl */ npiv_fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs; - npiv_fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = NULL; + npiv_fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = + tcm_qla2xxx_tpg_attrs; npiv_fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL; npiv_fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; npiv_fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h index 771f7b8..275d8b9 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h @@ -70,12 +70,8 @@ struct tcm_qla2xxx_lport { struct tcm_qla2xxx_fc_loopid *lport_loopid_map; /* Pointer to struct scsi_qla_host from qla2xxx LLD */ struct scsi_qla_host *qla_vha; - /* Pointer to struct scsi_qla_host for NPIV VP from qla2xxx LLD */ - struct scsi_qla_host *qla_npiv_vp; /* Pointer to struct qla_tgt pointer */ struct qla_tgt lport_qla_tgt; - /* Pointer to struct fc_vport for NPIV vport from libfc */ - struct fc_vport *npiv_vport; /* Pointer to TPG=1 for non NPIV mode */ struct tcm_qla2xxx_tpg *tpg_1; /* Returned by tcm_qla2xxx_make_lport() */ -- cgit v0.10.2 From 49a47f2cafbe4ca3839f8ae99c6fdeffd5fcaf45 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Tue, 14 Jan 2014 20:38:58 -0800 Subject: qla2xxx: Configure NPIV fc_vport via tcm_qla2xxx_npiv_make_lport This patch changes qla2xxx qlt_lport_register() code to accept target_lport_ptr + npiv_wwpn + npiv_wwnn parameters, and updates tcm_qla2xxx to use the new tcm_qla2xxx_lport_register_npiv_cb() callback for invoking fc_vport_create() from configfs context via tcm_qla2xxx_npiv_make_lport() code. In order for this to work, the qlt_lport_register() callback is now called without holding qla_tgt_mutex, as the fc_vport creation process will call qlt_vport_create() -> qlt_add_target(), which already expects to acquire it. It enforces /sys/kernel/config/target/qla2xxx_npiv/$NPIV_WWPN naming in the following format: $PHYSICAL_WWPN@$NPIV_WWPN:$NPIV_WWNN and assumes the $PHYSICAL_WWPN in question has already had been configured for target mode in non NPIV mode. Finally, it updates existing tcm_qla2xxx_lport_register_cb() logic to setup the non NPIV assignments that have now been moved out of qlt_lport_register() code. Cc: Sawan Chandak Cc: Quinn Tran Cc: Saurav Kashyap Signed-off-by: Nicholas Bellinger diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 34ed246..b596f8b 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -4235,8 +4235,9 @@ static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn, * @callback: lport initialization callback for tcm_qla2xxx code * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data */ -int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn, - int (*callback)(struct scsi_qla_host *), void *target_lport_ptr) +int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn, + u64 npiv_wwpn, u64 npiv_wwnn, + int (*callback)(struct scsi_qla_host *, void *, u64, u64)) { struct qla_tgt *tgt; struct scsi_qla_host *vha; @@ -4259,7 +4260,7 @@ int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn, continue; spin_lock_irqsave(&ha->hardware_lock, flags); - if (host->active_mode & MODE_TARGET) { + if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) { pr_debug("MODE_TARGET already active on qla2xxx(%d)\n", host->host_no); spin_unlock_irqrestore(&ha->hardware_lock, flags); @@ -4273,24 +4274,18 @@ int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn, " qla2xxx scsi_host\n"); continue; } - qlt_lport_dump(vha, wwpn, b); + qlt_lport_dump(vha, phys_wwpn, b); if (memcmp(vha->port_name, b, WWN_SIZE)) { scsi_host_put(host); continue; } - /* - * Setup passed parameters ahead of invoking callback - */ - ha->tgt.tgt_ops = qla_tgt_ops; - vha->vha_tgt.target_lport_ptr = target_lport_ptr; - rc = (*callback)(vha); - if (rc != 0) { - ha->tgt.tgt_ops = NULL; - vha->vha_tgt.target_lport_ptr = NULL; - scsi_host_put(host); - } mutex_unlock(&qla_tgt_mutex); + + rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn); + if (rc != 0) + scsi_host_put(host); + return rc; } mutex_unlock(&qla_tgt_mutex); diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index b33e411..1d10eec 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -932,8 +932,8 @@ void qlt_disable_vha(struct scsi_qla_host *); */ extern int qlt_add_target(struct qla_hw_data *, struct scsi_qla_host *); extern int qlt_remove_target(struct qla_hw_data *, struct scsi_qla_host *); -extern int qlt_lport_register(struct qla_tgt_func_tmpl *, u64, - int (*callback)(struct scsi_qla_host *), void *); +extern int qlt_lport_register(void *, u64, u64, u64, + int (*callback)(struct scsi_qla_host *, void *, u64, u64)); extern void qlt_lport_deregister(struct scsi_qla_host *); extern void qlt_unreg_sess(struct qla_tgt_sess *); extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *); diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 113ca95..75a141b 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -1559,14 +1559,18 @@ static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport) return 0; } -static int tcm_qla2xxx_lport_register_cb(struct scsi_qla_host *vha) +static int tcm_qla2xxx_lport_register_cb(struct scsi_qla_host *vha, + void *target_lport_ptr, + u64 npiv_wwpn, u64 npiv_wwnn) { - struct tcm_qla2xxx_lport *lport; + struct qla_hw_data *ha = vha->hw; + struct tcm_qla2xxx_lport *lport = + (struct tcm_qla2xxx_lport *)target_lport_ptr; /* - * Setup local pointer to vha, NPIV VP pointer (if present) and - * vha->tcm_lport pointer + * Setup tgt_ops, local pointer to vha and target_lport_ptr */ - lport = (struct tcm_qla2xxx_lport *)vha->vha_tgt.target_lport_ptr; + ha->tgt.tgt_ops = &tcm_qla2xxx_template; + vha->vha_tgt.target_lport_ptr = target_lport_ptr; lport->qla_vha = vha; return 0; @@ -1598,8 +1602,8 @@ static struct se_wwn *tcm_qla2xxx_make_lport( if (ret != 0) goto out; - ret = qlt_lport_register(&tcm_qla2xxx_template, wwpn, - tcm_qla2xxx_lport_register_cb, lport); + ret = qlt_lport_register(lport, wwpn, 0, 0, + tcm_qla2xxx_lport_register_cb); if (ret != 0) goto out_lport; @@ -1637,20 +1641,70 @@ static void tcm_qla2xxx_drop_lport(struct se_wwn *wwn) kfree(lport); } +static int tcm_qla2xxx_lport_register_npiv_cb(struct scsi_qla_host *base_vha, + void *target_lport_ptr, + u64 npiv_wwpn, u64 npiv_wwnn) +{ + struct fc_vport *vport; + struct Scsi_Host *sh = base_vha->host; + struct scsi_qla_host *npiv_vha; + struct tcm_qla2xxx_lport *lport = + (struct tcm_qla2xxx_lport *)target_lport_ptr; + struct fc_vport_identifiers vport_id; + + if (!qla_tgt_mode_enabled(base_vha)) { + pr_err("qla2xxx base_vha not enabled for target mode\n"); + return -EPERM; + } + + memset(&vport_id, 0, sizeof(vport_id)); + vport_id.port_name = npiv_wwpn; + vport_id.node_name = npiv_wwnn; + vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR; + vport_id.vport_type = FC_PORTTYPE_NPIV; + vport_id.disable = false; + + vport = fc_vport_create(sh, 0, &vport_id); + if (!vport) { + pr_err("fc_vport_create failed for qla2xxx_npiv\n"); + return -ENODEV; + } + /* + * Setup local pointer to NPIV vhba + target_lport_ptr + */ + npiv_vha = (struct scsi_qla_host *)vport->dd_data; + npiv_vha->vha_tgt.target_lport_ptr = target_lport_ptr; + lport->qla_vha = npiv_vha; + + scsi_host_get(npiv_vha->host); + return 0; +} + + static struct se_wwn *tcm_qla2xxx_npiv_make_lport( struct target_fabric_configfs *tf, struct config_group *group, const char *name) { struct tcm_qla2xxx_lport *lport; - u64 npiv_wwpn, npiv_wwnn; + u64 phys_wwpn, npiv_wwpn, npiv_wwnn; + char *p, tmp[128]; int ret; - struct scsi_qla_host *vha = NULL; - struct qla_hw_data *ha = NULL; - scsi_qla_host_t *base_vha = NULL; - if (tcm_qla2xxx_npiv_parse_wwn(name, strlen(name)+1, - &npiv_wwpn, &npiv_wwnn) < 0) + snprintf(tmp, 128, "%s", name); + + p = strchr(tmp, '@'); + if (!p) { + pr_err("Unable to locate NPIV '@' seperator\n"); + return ERR_PTR(-EINVAL); + } + *p++ = '\0'; + + if (tcm_qla2xxx_parse_wwn(tmp, &phys_wwpn, 1) < 0) + return ERR_PTR(-EINVAL); + + if (tcm_qla2xxx_npiv_parse_wwn(p, strlen(p)+1, + &npiv_wwpn, &npiv_wwnn) < 0) return ERR_PTR(-EINVAL); lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL); @@ -1668,21 +1722,11 @@ static struct se_wwn *tcm_qla2xxx_npiv_make_lport( if (ret != 0) goto out; - ret = qlt_lport_register(&tcm_qla2xxx_template, npiv_wwpn, - tcm_qla2xxx_lport_register_cb, lport); - + ret = qlt_lport_register(lport, phys_wwpn, npiv_wwpn, npiv_wwnn, + tcm_qla2xxx_lport_register_npiv_cb); if (ret != 0) goto out_lport; - vha = lport->qla_vha; - ha = vha->hw; - base_vha = pci_get_drvdata(ha->pdev); - - if (!qla_tgt_mode_enabled(base_vha)) { - ret = -EPERM; - goto out_lport; - } - return &lport->lport_wwn; out_lport: vfree(lport->lport_loopid_map); @@ -1696,14 +1740,16 @@ static void tcm_qla2xxx_npiv_drop_lport(struct se_wwn *wwn) { struct tcm_qla2xxx_lport *lport = container_of(wwn, struct tcm_qla2xxx_lport, lport_wwn); - struct scsi_qla_host *vha = lport->qla_vha; - struct Scsi_Host *sh = vha->host; + struct scsi_qla_host *npiv_vha = lport->qla_vha; + struct qla_hw_data *ha = npiv_vha->hw; + scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); + + scsi_host_put(npiv_vha->host); /* * Notify libfc that we want to release the vha->fc_vport */ - fc_vport_terminate(vha->fc_vport); - - scsi_host_put(sh); + fc_vport_terminate(npiv_vha->fc_vport); + scsi_host_put(base_vha->host); kfree(lport); } -- cgit v0.10.2 From 4a4caa29f1abcb14377e05d57c0793d338fb945d Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Fri, 10 Jan 2014 02:06:59 +0000 Subject: iscsi-target: Pre-allocate more tags to avoid ack starvation This patch addresses an traditional iscsi-target fabric ack starvation issue where iscsit_allocate_cmd() -> percpu_ida_alloc_state() ends up hitting slow path percpu-ida code, because iscsit_ack_from_expstatsn() is expected to free ack'ed tags after tag allocation. This is done to take into account the tags waiting to be acknowledged and released in iscsit_ack_from_expstatsn(), but who's number are not directly limited by the CmdSN Window queue_depth being enforced by the target. So that said, this patch bumps up the pre-allocated number of per session tags to: (max(queue_depth, ISCSIT_MIN_TAGS) * 2) + ISCSIT_EXTRA_TAGS for good measure to avoid the percpu_ida_alloc_state() slow path. Cc: #3.12+ Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index 83c965c..582ba84 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c @@ -1192,7 +1192,7 @@ get_target: */ alloc_tags: tag_num = max_t(u32, ISCSIT_MIN_TAGS, queue_depth); - tag_num += (tag_num / 2) + ISCSIT_EXTRA_TAGS; + tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS; tag_size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size; ret = transport_alloc_session_tags(sess->se_sess, tag_num, tag_size); -- cgit v0.10.2 From 6f6b5d1ec56acdeab0503d2b823f6f88a0af493e Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sun, 19 Jan 2014 08:26:37 +0000 Subject: percpu_ida: Make percpu_ida_alloc + callers accept task state bitmask This patch changes percpu_ida_alloc() + callers to accept task state bitmask for prepare_to_wait() for code like target/iscsi that needs it for interruptible sleep, that is provided in a subsequent patch. It now expects TASK_UNINTERRUPTIBLE when the caller is able to sleep waiting for a new tag, or TASK_RUNNING when the caller cannot sleep, and is forced to return a negative value when no tags are available. v2 changes: - Include blk-mq + tcm_fc + vhost/scsi + target/iscsi changes - Drop signal_pending_state() call v3 changes: - Only call prepare_to_wait() + finish_wait() when != TASK_RUNNING (PeterZ) Reported-by: Linus Torvalds Cc: Linus Torvalds Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Jens Axboe Signed-off-by: Kent Overstreet Cc: #3.12+ Signed-off-by: Nicholas Bellinger diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index d64a02f..5d70edc 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -36,7 +36,8 @@ static unsigned int __blk_mq_get_tag(struct blk_mq_tags *tags, gfp_t gfp) { int tag; - tag = percpu_ida_alloc(&tags->free_tags, gfp); + tag = percpu_ida_alloc(&tags->free_tags, (gfp & __GFP_WAIT) ? + TASK_UNINTERRUPTIBLE : TASK_RUNNING); if (tag < 0) return BLK_MQ_TAG_FAIL; return tag + tags->nr_reserved_tags; @@ -52,7 +53,8 @@ static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_tags *tags, return BLK_MQ_TAG_FAIL; } - tag = percpu_ida_alloc(&tags->reserved_tags, gfp); + tag = percpu_ida_alloc(&tags->reserved_tags, (gfp & __GFP_WAIT) ? + TASK_UNINTERRUPTIBLE : TASK_RUNNING); if (tag < 0) return BLK_MQ_TAG_FAIL; return tag; diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index 0819e68..9b8e1db 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -156,9 +156,13 @@ struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask) { struct iscsi_cmd *cmd; struct se_session *se_sess = conn->sess->se_sess; - int size, tag; + int size, tag, state = (gfp_mask & __GFP_WAIT) ? TASK_UNINTERRUPTIBLE : + TASK_RUNNING; + + tag = percpu_ida_alloc(&se_sess->sess_tag_pool, state); + if (tag < 0) + return NULL; - tag = percpu_ida_alloc(&se_sess->sess_tag_pool, gfp_mask); size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size; cmd = (struct iscsi_cmd *)(se_sess->sess_cmd_map + (tag * size)); memset(cmd, 0, size); diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index 479ec56..8b2c1aa 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -438,7 +438,7 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp) struct se_session *se_sess = sess->se_sess; int tag; - tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC); + tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING); if (tag < 0) goto busy; diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 84488a8..2d084fb 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -728,7 +728,7 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, } se_sess = tv_nexus->tvn_se_sess; - tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC); + tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING); if (tag < 0) { pr_err("Unable to obtain tag for tcm_vhost_cmd\n"); return ERR_PTR(-ENOMEM); diff --git a/include/linux/percpu_ida.h b/include/linux/percpu_ida.h index 1900bd0..f5cfdd6 100644 --- a/include/linux/percpu_ida.h +++ b/include/linux/percpu_ida.h @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include @@ -61,7 +62,7 @@ struct percpu_ida { /* Max size of percpu freelist, */ #define IDA_DEFAULT_PCPU_SIZE ((IDA_DEFAULT_PCPU_BATCH_MOVE * 3) / 2) -int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp); +int percpu_ida_alloc(struct percpu_ida *pool, int state); void percpu_ida_free(struct percpu_ida *pool, unsigned tag); void percpu_ida_destroy(struct percpu_ida *pool); diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c index 9d054bf..58b6714 100644 --- a/lib/percpu_ida.c +++ b/lib/percpu_ida.c @@ -132,22 +132,22 @@ static inline unsigned alloc_local_tag(struct percpu_ida_cpu *tags) /** * percpu_ida_alloc - allocate a tag * @pool: pool to allocate from - * @gfp: gfp flags + * @state: task state for prepare_to_wait * * Returns a tag - an integer in the range [0..nr_tags) (passed to * tag_pool_init()), or otherwise -ENOSPC on allocation failure. * * Safe to be called from interrupt context (assuming it isn't passed - * __GFP_WAIT, of course). + * TASK_UNINTERRUPTIBLE, of course). * * @gfp indicates whether or not to wait until a free id is available (it's not * used for internal memory allocations); thus if passed __GFP_WAIT we may sleep * however long it takes until another thread frees an id (same semantics as a * mempool). * - * Will not fail if passed __GFP_WAIT. + * Will not fail if passed TASK_UNINTERRUPTIBLE. */ -int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp) +int percpu_ida_alloc(struct percpu_ida *pool, int state) { DEFINE_WAIT(wait); struct percpu_ida_cpu *tags; @@ -174,7 +174,8 @@ int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp) * * global lock held and irqs disabled, don't need percpu lock */ - prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE); + if (state != TASK_RUNNING) + prepare_to_wait(&pool->wait, &wait, state); if (!tags->nr_free) alloc_global_tags(pool, tags); @@ -191,7 +192,7 @@ int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp) spin_unlock(&pool->lock); local_irq_restore(flags); - if (tag >= 0 || !(gfp & __GFP_WAIT)) + if (tag >= 0 || state == TASK_RUNNING) break; schedule(); @@ -199,8 +200,9 @@ int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp) local_irq_save(flags); tags = this_cpu_ptr(pool->tag_cpu); } + if (state != TASK_RUNNING) + finish_wait(&pool->wait, &wait); - finish_wait(&pool->wait, &wait); return tag; } EXPORT_SYMBOL_GPL(percpu_ida_alloc); -- cgit v0.10.2 From 555b270e25b0279b98083518a85f4b1da144a181 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Mon, 20 Jan 2014 03:36:24 +0000 Subject: iscsi-target: Fix connection reset hang with percpu_ida_alloc This patch addresses a bug where connection reset would hang indefinately once percpu_ida_alloc() was starved for tags, due to the fact that it always assumed uninterruptible sleep mode. So now make percpu_ida_alloc() check for signal_pending_state() for making interruptible sleep optional, and convert iscsit_allocate_cmd() to set TASK_INTERRUPTIBLE for GFP_KERNEL, or TASK_RUNNING for GFP_ATOMIC. Reported-by: Linus Torvalds Cc: Kent Overstreet Cc: #3.12+ Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index 9b8e1db..5477eca 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -156,7 +156,7 @@ struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask) { struct iscsi_cmd *cmd; struct se_session *se_sess = conn->sess->se_sess; - int size, tag, state = (gfp_mask & __GFP_WAIT) ? TASK_UNINTERRUPTIBLE : + int size, tag, state = (gfp_mask & __GFP_WAIT) ? TASK_INTERRUPTIBLE : TASK_RUNNING; tag = percpu_ida_alloc(&se_sess->sess_tag_pool, state); diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c index 58b6714..7be235f 100644 --- a/lib/percpu_ida.c +++ b/lib/percpu_ida.c @@ -138,14 +138,14 @@ static inline unsigned alloc_local_tag(struct percpu_ida_cpu *tags) * tag_pool_init()), or otherwise -ENOSPC on allocation failure. * * Safe to be called from interrupt context (assuming it isn't passed - * TASK_UNINTERRUPTIBLE, of course). + * TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, of course). * * @gfp indicates whether or not to wait until a free id is available (it's not * used for internal memory allocations); thus if passed __GFP_WAIT we may sleep * however long it takes until another thread frees an id (same semantics as a * mempool). * - * Will not fail if passed TASK_UNINTERRUPTIBLE. + * Will not fail if passed TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE. */ int percpu_ida_alloc(struct percpu_ida *pool, int state) { @@ -195,6 +195,11 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state) if (tag >= 0 || state == TASK_RUNNING) break; + if (signal_pending_state(state, current)) { + tag = -ERESTARTSYS; + break; + } + schedule(); local_irq_save(flags); -- cgit v0.10.2 From 676687c69697d2081d25afd14ee90937d1fb0c8e Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Mon, 20 Jan 2014 03:36:44 +0000 Subject: iscsi-target: Convert gfp_t parameter to task state bitmask This patch propigates the use of task state bitmask now used by percpu_ida_alloc() up the iscsi-target callchain, replacing the use of GFP_ATOMIC for TASK_RUNNING, and GFP_KERNEL for TASK_INTERRUPTIBLE. Also, drop the unnecessary gfp_t parameter to isert_allocate_cmd(), and just pass TASK_INTERRUPTIBLE into iscsit_allocate_cmd(). Signed-off-by: Nicholas Bellinger diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 421182b..63bcf69 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -1028,13 +1028,13 @@ isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen, } static struct iscsi_cmd -*isert_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp) +*isert_allocate_cmd(struct iscsi_conn *conn) { struct isert_conn *isert_conn = (struct isert_conn *)conn->context; struct isert_cmd *isert_cmd; struct iscsi_cmd *cmd; - cmd = iscsit_allocate_cmd(conn, gfp); + cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); if (!cmd) { pr_err("Unable to allocate iscsi_cmd + isert_cmd\n"); return NULL; @@ -1223,7 +1223,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, switch (opcode) { case ISCSI_OP_SCSI_CMD: - cmd = isert_allocate_cmd(conn, GFP_KERNEL); + cmd = isert_allocate_cmd(conn); if (!cmd) break; @@ -1237,7 +1237,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, rx_desc, (unsigned char *)hdr); break; case ISCSI_OP_NOOP_OUT: - cmd = isert_allocate_cmd(conn, GFP_KERNEL); + cmd = isert_allocate_cmd(conn); if (!cmd) break; @@ -1250,7 +1250,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, (unsigned char *)hdr); break; case ISCSI_OP_SCSI_TMFUNC: - cmd = isert_allocate_cmd(conn, GFP_KERNEL); + cmd = isert_allocate_cmd(conn); if (!cmd) break; @@ -1258,7 +1258,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, (unsigned char *)hdr); break; case ISCSI_OP_LOGOUT: - cmd = isert_allocate_cmd(conn, GFP_KERNEL); + cmd = isert_allocate_cmd(conn); if (!cmd) break; @@ -1269,7 +1269,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, HZ); break; case ISCSI_OP_TEXT: - cmd = isert_allocate_cmd(conn, GFP_KERNEL); + cmd = isert_allocate_cmd(conn); if (!cmd) break; diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index d70e911..2a52752 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -621,7 +621,7 @@ static int iscsit_add_reject( { struct iscsi_cmd *cmd; - cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); + cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); if (!cmd) return -1; @@ -2476,7 +2476,7 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn) if (!conn_p) return; - cmd = iscsit_allocate_cmd(conn_p, GFP_ATOMIC); + cmd = iscsit_allocate_cmd(conn_p, TASK_RUNNING); if (!cmd) { iscsit_dec_conn_usage_count(conn_p); return; @@ -3952,7 +3952,7 @@ static int iscsi_target_rx_opcode(struct iscsi_conn *conn, unsigned char *buf) switch (hdr->opcode & ISCSI_OPCODE_MASK) { case ISCSI_OP_SCSI_CMD: - cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); + cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); if (!cmd) goto reject; @@ -3964,28 +3964,28 @@ static int iscsi_target_rx_opcode(struct iscsi_conn *conn, unsigned char *buf) case ISCSI_OP_NOOP_OUT: cmd = NULL; if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) { - cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); + cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); if (!cmd) goto reject; } ret = iscsit_handle_nop_out(conn, cmd, buf); break; case ISCSI_OP_SCSI_TMFUNC: - cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); + cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); if (!cmd) goto reject; ret = iscsit_handle_task_mgt_cmd(conn, cmd, buf); break; case ISCSI_OP_TEXT: - cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); + cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); if (!cmd) goto reject; ret = iscsit_handle_text_cmd(conn, cmd, buf); break; case ISCSI_OP_LOGOUT: - cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); + cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); if (!cmd) goto reject; diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index 5477eca..e655b04 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -152,12 +152,11 @@ void iscsit_free_r2ts_from_list(struct iscsi_cmd *cmd) * May be called from software interrupt (timer) context for allocating * iSCSI NopINs. */ -struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask) +struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, int state) { struct iscsi_cmd *cmd; struct se_session *se_sess = conn->sess->se_sess; - int size, tag, state = (gfp_mask & __GFP_WAIT) ? TASK_INTERRUPTIBLE : - TASK_RUNNING; + int size, tag; tag = percpu_ida_alloc(&se_sess->sess_tag_pool, state); if (tag < 0) @@ -930,7 +929,7 @@ static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response) u8 state; struct iscsi_cmd *cmd; - cmd = iscsit_allocate_cmd(conn, GFP_ATOMIC); + cmd = iscsit_allocate_cmd(conn, TASK_RUNNING); if (!cmd) return -1; diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h index e4fc34a..561a424 100644 --- a/drivers/target/iscsi/iscsi_target_util.h +++ b/drivers/target/iscsi/iscsi_target_util.h @@ -9,7 +9,7 @@ extern struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *); extern void iscsit_free_r2t(struct iscsi_r2t *, struct iscsi_cmd *); extern void iscsit_free_r2ts_from_list(struct iscsi_cmd *); extern struct iscsi_cmd *iscsit_alloc_cmd(struct iscsi_conn *, gfp_t); -extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, gfp_t); +extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, int); extern struct iscsi_seq *iscsit_get_seq_holder_for_datain(struct iscsi_cmd *, u32); extern struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *); extern struct iscsi_r2t *iscsit_get_holder_for_r2tsn(struct iscsi_cmd *, u32); diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h index a12589c..ae5a171 100644 --- a/include/target/iscsi/iscsi_transport.h +++ b/include/target/iscsi/iscsi_transport.h @@ -94,7 +94,7 @@ extern int iscsit_tmr_post_handler(struct iscsi_cmd *, struct iscsi_conn *); /* * From iscsi_target_util.c */ -extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, gfp_t); +extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, int); extern int iscsit_sequence_cmd(struct iscsi_conn *, struct iscsi_cmd *, unsigned char *, __be32); extern void iscsit_release_cmd(struct iscsi_cmd *); -- cgit v0.10.2 From 76736db3e291246fbce9db856706af3454b0b078 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Thu, 23 Jan 2014 19:29:38 +0200 Subject: target: Report bad sector in sense data for DIF errors SPC-4 states that data-integrity errors shall also report the failed sector in CHECK_CONDITION response sense data information field. Signed-off-by: Sagi Grimberg Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 75364c7..fa3cae3 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -1131,6 +1131,7 @@ sbc_dif_verify_write(struct se_cmd *cmd, sector_t start, unsigned int sectors, if (rc) { kunmap_atomic(paddr); kunmap_atomic(daddr); + cmd->bad_sector = sector; return rc; } @@ -1191,6 +1192,7 @@ sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors, if (rc) { kunmap_atomic(paddr); kunmap_atomic(daddr); + cmd->bad_sector = sector; return rc; } diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index aebe0bb..51a9736 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -2493,6 +2493,19 @@ static int transport_get_sense_codes( return 0; } +static +void transport_err_sector_info(unsigned char *buffer, sector_t bad_sector) +{ + /* Place failed LBA in sense data information descriptor 0. */ + buffer[SPC_ADD_SENSE_LEN_OFFSET] = 0xc; + buffer[SPC_DESC_TYPE_OFFSET] = 0; /* Information */ + buffer[SPC_ADDITIONAL_DESC_LEN_OFFSET] = 0xa; + buffer[SPC_VALIDITY_OFFSET] = 0x80; + + /* Descriptor Information: failing sector */ + put_unaligned_be64(bad_sector, &buffer[12]); +} + int transport_send_check_condition_and_sense(struct se_cmd *cmd, sense_reason_t reason, int from_transport) @@ -2695,6 +2708,7 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd, /* LOGICAL BLOCK GUARD CHECK FAILED */ buffer[SPC_ASC_KEY_OFFSET] = 0x10; buffer[SPC_ASCQ_KEY_OFFSET] = 0x01; + transport_err_sector_info(buffer, cmd->bad_sector); break; case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED: /* CURRENT ERROR */ @@ -2705,6 +2719,7 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd, /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */ buffer[SPC_ASC_KEY_OFFSET] = 0x10; buffer[SPC_ASCQ_KEY_OFFSET] = 0x02; + transport_err_sector_info(buffer, cmd->bad_sector); break; case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED: /* CURRENT ERROR */ @@ -2715,6 +2730,7 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */ buffer[SPC_ASC_KEY_OFFSET] = 0x10; buffer[SPC_ASCQ_KEY_OFFSET] = 0x03; + transport_err_sector_info(buffer, cmd->bad_sector); break; case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: default: diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 0336d70..d284186 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -37,6 +37,9 @@ /* Used by transport_send_check_condition_and_sense() */ #define SPC_SENSE_KEY_OFFSET 2 #define SPC_ADD_SENSE_LEN_OFFSET 7 +#define SPC_DESC_TYPE_OFFSET 8 +#define SPC_ADDITIONAL_DESC_LEN_OFFSET 9 +#define SPC_VALIDITY_OFFSET 10 #define SPC_ASC_KEY_OFFSET 12 #define SPC_ASCQ_KEY_OFFSET 13 #define TRANSPORT_IQN_LEN 224 @@ -560,7 +563,7 @@ struct se_cmd { unsigned int t_prot_nents; enum target_prot_ho prot_handover; sense_reason_t pi_err; - u32 block_num; + sector_t bad_sector; }; struct se_ua { -- cgit v0.10.2 From ee291e63293146db64668e8d65eb35c97e8324f4 Mon Sep 17 00:00:00 2001 From: Andy Grover Date: Fri, 24 Jan 2014 16:18:54 -0800 Subject: target/iscsi: Fix network portal creation race When creating network portals rapidly, such as when restoring a configuration, LIO's code to reuse existing portals can return a false negative if the thread hasn't run yet and set np_thread_state to ISCSI_NP_THREAD_ACTIVE. This causes an error in the network stack when attempting to bind to the same address/port. This patch sets NP_THREAD_ACTIVE before the np is placed on g_np_list, so even if the thread hasn't run yet, iscsit_get_np will return the existing np. Also, convert np_lock -> np_mutex + hold across adding new net portal to g_np_list to prevent a race where two threads may attempt to create the same network portal, resulting in one of them failing. (nab: Add missing mutex_unlocks in iscsit_add_np failure paths) (DanC: Fix incorrect spin_unlock -> spin_unlock_bh) Signed-off-by: Andy Grover Cc: #3.1+ Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 2a52752..a99637e 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -52,7 +52,7 @@ static LIST_HEAD(g_tiqn_list); static LIST_HEAD(g_np_list); static DEFINE_SPINLOCK(tiqn_lock); -static DEFINE_SPINLOCK(np_lock); +static DEFINE_MUTEX(np_lock); static struct idr tiqn_idr; struct idr sess_idr; @@ -307,6 +307,9 @@ bool iscsit_check_np_match( return false; } +/* + * Called with mutex np_lock held + */ static struct iscsi_np *iscsit_get_np( struct __kernel_sockaddr_storage *sockaddr, int network_transport) @@ -314,11 +317,10 @@ static struct iscsi_np *iscsit_get_np( struct iscsi_np *np; bool match; - spin_lock_bh(&np_lock); list_for_each_entry(np, &g_np_list, np_list) { - spin_lock(&np->np_thread_lock); + spin_lock_bh(&np->np_thread_lock); if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) { - spin_unlock(&np->np_thread_lock); + spin_unlock_bh(&np->np_thread_lock); continue; } @@ -330,13 +332,11 @@ static struct iscsi_np *iscsit_get_np( * while iscsi_tpg_add_network_portal() is called. */ np->np_exports++; - spin_unlock(&np->np_thread_lock); - spin_unlock_bh(&np_lock); + spin_unlock_bh(&np->np_thread_lock); return np; } - spin_unlock(&np->np_thread_lock); + spin_unlock_bh(&np->np_thread_lock); } - spin_unlock_bh(&np_lock); return NULL; } @@ -350,16 +350,22 @@ struct iscsi_np *iscsit_add_np( struct sockaddr_in6 *sock_in6; struct iscsi_np *np; int ret; + + mutex_lock(&np_lock); + /* * Locate the existing struct iscsi_np if already active.. */ np = iscsit_get_np(sockaddr, network_transport); - if (np) + if (np) { + mutex_unlock(&np_lock); return np; + } np = kzalloc(sizeof(struct iscsi_np), GFP_KERNEL); if (!np) { pr_err("Unable to allocate memory for struct iscsi_np\n"); + mutex_unlock(&np_lock); return ERR_PTR(-ENOMEM); } @@ -382,6 +388,7 @@ struct iscsi_np *iscsit_add_np( ret = iscsi_target_setup_login_socket(np, sockaddr); if (ret != 0) { kfree(np); + mutex_unlock(&np_lock); return ERR_PTR(ret); } @@ -390,6 +397,7 @@ struct iscsi_np *iscsit_add_np( pr_err("Unable to create kthread: iscsi_np\n"); ret = PTR_ERR(np->np_thread); kfree(np); + mutex_unlock(&np_lock); return ERR_PTR(ret); } /* @@ -400,10 +408,10 @@ struct iscsi_np *iscsit_add_np( * point because iscsi_np has not been added to g_np_list yet. */ np->np_exports = 1; + np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; - spin_lock_bh(&np_lock); list_add_tail(&np->np_list, &g_np_list); - spin_unlock_bh(&np_lock); + mutex_unlock(&np_lock); pr_debug("CORE[0] - Added Network Portal: %s:%hu on %s\n", np->np_ip, np->np_port, np->np_transport->name); @@ -469,9 +477,9 @@ int iscsit_del_np(struct iscsi_np *np) np->np_transport->iscsit_free_np(np); - spin_lock_bh(&np_lock); + mutex_lock(&np_lock); list_del(&np->np_list); - spin_unlock_bh(&np_lock); + mutex_unlock(&np_lock); pr_debug("CORE[0] - Removed Network Portal: %s:%hu on %s\n", np->np_ip, np->np_port, np->np_transport->name); -- cgit v0.10.2 From 5259a06ef97068b710f45d092a587e8d740f750f Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Tue, 28 Jan 2014 17:56:30 -0800 Subject: target: Fix percpu_ref_put race in transport_lun_remove_cmd This patch fixes a percpu_ref_put race for se_lun->lun_ref in transport_lun_remove_cmd() where ->lun_ref could end up being put more than once per command via different target completion and fabric release contexts. It adds a cmpxchg() for se_cmd->lun_ref_active to ensure that percpu_ref_put() is only ever called once per se_cmd. This bug was manifesting itself as a LUN shutdown regression bug in >= v3.13 code, where percpu_ref_kill() would end up hanging indefinately due to the incorrect percpu_ref count. (Change se_cmd->lun_ref_active from bool -> int to force at least a 4-byte cmpxchg with MIPS ll/sc ins. - Fengguang) Reported-by: Tommy Apel Cc: Tommy Apel Cc: #3.13+ Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 51a9736..c50fd9f 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -594,10 +594,11 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd) { struct se_lun *lun = cmd->se_lun; - if (!lun || !cmd->lun_ref_active) + if (!lun) return; - percpu_ref_put(&lun->lun_ref); + if (cmpxchg(&cmd->lun_ref_active, true, false)) + percpu_ref_put(&lun->lun_ref); } void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index d284186..909dacb 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -552,7 +552,7 @@ struct se_cmd { void *priv; /* Used for lun->lun_ref counting */ - bool lun_ref_active; + int lun_ref_active; /* DIF related members */ enum target_prot_op prot_op; -- cgit v0.10.2