Date: Wed, 16 Jul 2014 15:57:18 +0000 (UTC) From: Alexander Motin <mav@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r268767 - in head: sys/cam/ctl sys/cam/scsi sys/conf sys/modules/ctl usr.sbin/ctladm Message-ID: <201407161557.s6GFvI8L057466@svn.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: mav Date: Wed Jul 16 15:57:17 2014 New Revision: 268767 URL: http://svnweb.freebsd.org/changeset/base/268767 Log: Add support for VMWare dialect of EXTENDED COPY command, aka VAAI Clone. This allows to clone VMs and move them between LUNs inside one storage host without generating extra network traffic to the initiator and back, and without being limited by network bandwidth. LUNs participating in copy operation should have UNIQUE NAA or EUI IDs set. For LUNs without these IDs VMWare will use traditional copy operations. Beware: the above LUN IDs explicitly set to values non-unique from the VM cluster point of view may cause data corruption if wrong LUN is addressed! MFC after: 2 weeks Sponsored by: iXsystems, Inc. Added: head/sys/cam/ctl/ctl_tpc.c (contents, props changed) head/sys/cam/ctl/ctl_tpc.h (contents, props changed) head/sys/cam/ctl/ctl_tpc_local.c (contents, props changed) Modified: head/sys/cam/ctl/ctl.c head/sys/cam/ctl/ctl_cmd_table.c head/sys/cam/ctl/ctl_frontend.c head/sys/cam/ctl/ctl_frontend.h head/sys/cam/ctl/ctl_frontend_iscsi.c head/sys/cam/ctl/ctl_private.h head/sys/cam/ctl/ctl_ser_table.c head/sys/cam/ctl/scsi_ctl.c head/sys/cam/scsi/scsi_all.h head/sys/conf/files head/sys/modules/ctl/Makefile head/usr.sbin/ctladm/ctladm.8 Modified: head/sys/cam/ctl/ctl.c ============================================================================== --- head/sys/cam/ctl/ctl.c Wed Jul 16 14:08:01 2014 (r268766) +++ head/sys/cam/ctl/ctl.c Wed Jul 16 15:57:17 2014 (r268767) @@ -320,10 +320,10 @@ SYSCTL_INT(_kern_cam_ctl, OID_AUTO, verb /* * Supported pages (0x00), Serial number (0x80), Device ID (0x83), - * SCSI Ports (0x88), Block limits (0xB0) and + * SCSI Ports (0x88), Third-party Copy (0x8F), Block limits (0xB0) and * Logical Block Provisioning (0xB2) */ -#define SCSI_EVPD_NUM_SUPPORTED_PAGES 6 +#define SCSI_EVPD_NUM_SUPPORTED_PAGES 7 static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event, int param); @@ -349,8 +349,6 @@ static int ctl_ioctl_fill_ooa(struct ctl struct ctl_ooa_entry *kern_entries); static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td); -uint32_t ctl_get_resindex(struct ctl_nexus *nexus); -uint32_t ctl_port_idx(int port_num); static uint32_t ctl_map_lun(int port_num, uint32_t lun); static uint32_t ctl_map_lun_back(int port_num, uint32_t lun); #ifdef unused @@ -4598,6 +4596,7 @@ ctl_alloc_lun(struct ctl_softc *ctl_soft TAILQ_INIT(&lun->ooa_queue); TAILQ_INIT(&lun->blocked_queue); STAILQ_INIT(&lun->error_list); + ctl_tpc_init(lun); /* * Initialize the mode page index. @@ -4749,6 +4748,7 @@ ctl_free_lun(struct ctl_lun *lun) atomic_subtract_int(&lun->be_lun->be->num_luns, 1); lun->be_lun->lun_shutdown(lun->be_lun->be_lun); + ctl_tpc_shutdown(lun); mtx_destroy(&lun->lun_lock); free(lun->lun_devid, M_CTL); if (lun->flags & CTL_LUN_MALLOCED) @@ -9821,10 +9821,12 @@ ctl_inquiry_evpd_supported(struct ctl_sc pages->page_list[2] = SVPD_DEVICE_ID; /* SCSI Ports */ pages->page_list[3] = SVPD_SCSI_PORTS; + /* Third-party Copy */ + pages->page_list[4] = SVPD_SCSI_TPC; /* Block limits */ - pages->page_list[4] = SVPD_BLOCK_LIMITS; + pages->page_list[5] = SVPD_BLOCK_LIMITS; /* Logical Block Provisioning */ - pages->page_list[5] = SVPD_LBP; + pages->page_list[6] = SVPD_LBP; ctsio->scsi_status = SCSI_STATUS_OK; @@ -10023,7 +10025,7 @@ ctl_inquiry_evpd_scsi_ports(struct ctl_s struct scsi_vpd_port_designation_cont *pdc; struct ctl_lun *lun; struct ctl_port *port; - int data_len, num_target_ports, id_len, g, pg, p; + int data_len, num_target_ports, iid_len, id_len, g, pg, p; int num_target_port_groups, single; lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; @@ -10034,6 +10036,7 @@ ctl_inquiry_evpd_scsi_ports(struct ctl_s else num_target_port_groups = NUM_TARGET_PORT_GROUPS; num_target_ports = 0; + iid_len = 0; id_len = 0; mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(port, &softc->port_list, links) { @@ -10043,6 +10046,8 @@ ctl_inquiry_evpd_scsi_ports(struct ctl_s CTL_MAX_LUNS) continue; num_target_ports++; + if (port->init_devid) + iid_len += port->init_devid->len; if (port->port_devid) id_len += port->port_devid->len; } @@ -10050,7 +10055,7 @@ ctl_inquiry_evpd_scsi_ports(struct ctl_s data_len = sizeof(struct scsi_vpd_scsi_ports) + num_target_port_groups * num_target_ports * (sizeof(struct scsi_vpd_port_designation) + - sizeof(struct scsi_vpd_port_designation_cont)) + id_len; + sizeof(struct scsi_vpd_port_designation_cont)) + iid_len + id_len; ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); sp = (struct scsi_vpd_scsi_ports *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; @@ -10098,19 +10103,22 @@ ctl_inquiry_evpd_scsi_ports(struct ctl_s continue; p = port->targ_port % CTL_MAX_PORTS + g * CTL_MAX_PORTS; scsi_ulto2b(p, pd->relative_port_id); - scsi_ulto2b(0, pd->initiator_transportid_length); + if (port->init_devid && g == pg) { + iid_len = port->init_devid->len; + memcpy(pd->initiator_transportid, + port->init_devid->data, port->init_devid->len); + } else + iid_len = 0; + scsi_ulto2b(iid_len, pd->initiator_transportid_length); pdc = (struct scsi_vpd_port_designation_cont *) - &pd->initiator_transportid[0]; + (&pd->initiator_transportid[iid_len]); if (port->port_devid && g == pg) { id_len = port->port_devid->len; - scsi_ulto2b(port->port_devid->len, - pdc->target_port_descriptors_length); memcpy(pdc->target_port_descriptors, port->port_devid->data, port->port_devid->len); - } else { + } else id_len = 0; - scsi_ulto2b(0, pdc->target_port_descriptors_length); - } + scsi_ulto2b(id_len, pdc->target_port_descriptors_length); pd = (struct scsi_vpd_port_designation *) ((uint8_t *)pdc->target_port_descriptors + id_len); } @@ -10259,6 +10267,9 @@ ctl_inquiry_evpd(struct ctl_scsiio *ctsi case SVPD_SCSI_PORTS: retval = ctl_inquiry_evpd_scsi_ports(ctsio, alloc_len); break; + case SVPD_SCSI_TPC: + retval = ctl_inquiry_evpd_tpc(ctsio, alloc_len); + break; case SVPD_BLOCK_LIMITS: retval = ctl_inquiry_evpd_block_limits(ctsio, alloc_len); break; @@ -10289,7 +10300,7 @@ ctl_inquiry_std(struct ctl_scsiio *ctsio struct ctl_lun *lun; char *val; uint32_t alloc_len; - int is_fc; + ctl_port_type port_type; ctl_softc = control_softc; @@ -10298,11 +10309,10 @@ ctl_inquiry_std(struct ctl_scsiio *ctsio * We treat the ioctl front end, and any SCSI adapters, as packetized * SCSI front ends. */ - if (ctl_softc->ctl_ports[ctl_port_idx(ctsio->io_hdr.nexus.targ_port)]->port_type != - CTL_PORT_FC) - is_fc = 0; - else - is_fc = 1; + port_type = ctl_softc->ctl_ports[ + ctl_port_idx(ctsio->io_hdr.nexus.targ_port)]->port_type; + if (port_type == CTL_PORT_IOCTL || port_type == CTL_PORT_INTERNAL) + port_type = CTL_PORT_SCSI; lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; cdb = (struct scsi_inquiry *)ctsio->cdb; @@ -10381,7 +10391,7 @@ ctl_inquiry_std(struct ctl_scsiio *ctsio inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE; /* RMB in byte 2 is 0 */ - inq_ptr->version = SCSI_REV_SPC3; + inq_ptr->version = SCSI_REV_SPC4; /* * According to SAM-3, even if a device only supports a single @@ -10406,17 +10416,18 @@ ctl_inquiry_std(struct ctl_scsiio *ctsio CTL_DEBUG_PRINT(("additional_length = %d\n", inq_ptr->additional_length)); - inq_ptr->spc3_flags = SPC3_SID_TPGS_IMPLICIT; + inq_ptr->spc3_flags = SPC3_SID_3PC; + if (!ctl_is_single) + inq_ptr->spc3_flags |= SPC3_SID_TPGS_IMPLICIT; /* 16 bit addressing */ - if (is_fc == 0) + if (port_type == CTL_PORT_SCSI) inq_ptr->spc2_flags = SPC2_SID_ADDR16; /* XXX set the SID_MultiP bit here if we're actually going to respond on multiple ports */ inq_ptr->spc2_flags |= SPC2_SID_MultiP; /* 16 bit data bus, synchronous transfers */ - /* XXX these flags don't apply for FC */ - if (is_fc == 0) + if (port_type == CTL_PORT_SCSI) inq_ptr->flags = SID_WBus16 | SID_Sync; /* * XXX KDM do we want to support tagged queueing on the control @@ -10477,33 +10488,36 @@ ctl_inquiry_std(struct ctl_scsiio *ctsio * and Selection) and Information Unit transfers on both the * control and array devices. */ - if (is_fc == 0) + if (port_type == CTL_PORT_SCSI) inq_ptr->spi3data = SID_SPI_CLOCK_DT_ST | SID_SPI_QAS | SID_SPI_IUS; - /* SAM-3 */ - scsi_ulto2b(0x0060, inq_ptr->version1); - /* SPC-3 (no version claimed) XXX should we claim a version? */ - scsi_ulto2b(0x0300, inq_ptr->version2); - if (is_fc) { + /* SAM-5 (no version claimed) */ + scsi_ulto2b(0x00A0, inq_ptr->version1); + /* SPC-4 (no version claimed) */ + scsi_ulto2b(0x0460, inq_ptr->version2); + if (port_type == CTL_PORT_FC) { /* FCP-2 ANSI INCITS.350:2003 */ scsi_ulto2b(0x0917, inq_ptr->version3); - } else { + } else if (port_type == CTL_PORT_SCSI) { /* SPI-4 ANSI INCITS.362:200x */ scsi_ulto2b(0x0B56, inq_ptr->version3); + } else if (port_type == CTL_PORT_ISCSI) { + /* iSCSI (no version claimed) */ + scsi_ulto2b(0x0960, inq_ptr->version3); + } else if (port_type == CTL_PORT_SAS) { + /* SAS (no version claimed) */ + scsi_ulto2b(0x0BE0, inq_ptr->version3); } if (lun == NULL) { - /* SBC-2 (no version claimed) XXX should we claim a version? */ - scsi_ulto2b(0x0320, inq_ptr->version4); + /* SBC-3 (no version claimed) */ + scsi_ulto2b(0x04C0, inq_ptr->version4); } else { switch (lun->be_lun->lun_type) { case T_DIRECT: - /* - * SBC-2 (no version claimed) XXX should we claim a - * version? - */ - scsi_ulto2b(0x0320, inq_ptr->version4); + /* SBC-3 (no version claimed) */ + scsi_ulto2b(0x04C0, inq_ptr->version4); break; case T_PROCESSOR: default: Modified: head/sys/cam/ctl/ctl_cmd_table.c ============================================================================== --- head/sys/cam/ctl/ctl_cmd_table.c Wed Jul 16 14:08:01 2014 (r268766) +++ head/sys/cam/ctl/ctl_cmd_table.c Wed Jul 16 15:57:17 2014 (r268767) @@ -190,6 +190,156 @@ const struct ctl_cmd_entry ctl_cmd_table /* 08-1f */ }; +/* 83 EXTENDED COPY */ +const struct ctl_cmd_entry ctl_cmd_table_83[32] = +{ +/* 00 EXTENDED COPY (LID1) */ +{ctl_extended_copy_lid1, CTL_SERIDX_RD_CAP, CTL_CMD_FLAG_OK_ON_BOTH | + CTL_FLAG_DATA_OUT, + CTL_LUN_PAT_NONE, + 16, { 0x00, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0x07}}, + +/* 01 EXTENDED COPY (LID4) */ +{ctl_extended_copy_lid4, CTL_SERIDX_RD_CAP, CTL_CMD_FLAG_OK_ON_BOTH | + CTL_FLAG_DATA_OUT, + CTL_LUN_PAT_NONE, + 16, { 0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0x07}}, + +/* 02 */ +{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, + +/* 03 */ +{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, + +/* 04 */ +{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, + +/* 05 */ +{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, + +/* 06 */ +{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, + +/* 07 */ +{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, + +/* 08 */ +{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, + +/* 09 */ +{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, + +/* 0A */ +{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, + +/* 0B */ +{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, + +/* 0C */ +{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, + +/* 0D */ +{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, + +/* 0E */ +{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, + +/* 0F */ +{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, + +/* 10 POPULATE TOKEN */ +{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, + +/* 11 WRITE USING TOKEN */ +{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, + +/* 12 */ +{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, + +/* 13 */ +{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, + +/* 14 */ +{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, + +/* 15 */ +{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, + +/* 16 */ +{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, + +/* 17 */ +{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, + +/* 18 */ +{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, + +/* 19 */ +{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, + +/* 1A */ +{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, + +/* 1B */ +{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, + +/* 1C COPY OPERATION ABORT */ +{ctl_copy_operation_abort, CTL_SERIDX_RD_CAP, CTL_CMD_FLAG_OK_ON_BOTH | + CTL_FLAG_DATA_NONE, + CTL_LUN_PAT_NONE, + 16, { 0x1c, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x07}}, +}; + +/* 84 RECEIVE COPY STATUS */ +const struct ctl_cmd_entry ctl_cmd_table_84[32] = +{ +/* 00 RECEIVE COPY STATUS (LID1) */ +{ctl_receive_copy_status_lid1, CTL_SERIDX_RD_CAP, + CTL_CMD_FLAG_OK_ON_BOTH | + CTL_FLAG_DATA_IN, + CTL_LUN_PAT_NONE, + 16, {0x00, 0xff, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0x07}}, + +/* 01 RECEIVE COPY DATA (LID1) */ +{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, + +/* 02 */ +{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, + +/* 03 RECEIVE COPY OPERATING PARAMETERS */ +{ctl_receive_copy_operating_parameters, CTL_SERIDX_RD_CAP, + CTL_CMD_FLAG_OK_ON_BOTH | + CTL_CMD_FLAG_OK_ON_STOPPED | + CTL_CMD_FLAG_OK_ON_INOPERABLE | + CTL_CMD_FLAG_OK_ON_SECONDARY | + CTL_FLAG_DATA_IN, + CTL_LUN_PAT_NONE, + 16, {0x03, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0x07}}, + +/* 04 RECEIVE COPY FAILURE DETAILS (LID1) */ +{ctl_receive_copy_failure_details, CTL_SERIDX_RD_CAP, + CTL_CMD_FLAG_OK_ON_BOTH | + CTL_FLAG_DATA_IN, + CTL_LUN_PAT_NONE, + 16, {0x04, 0xff, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0x07}}, + +/* 05 RECEIVE COPY STATUS (LID4) */ +{ctl_receive_copy_status_lid4, CTL_SERIDX_RD_CAP, + CTL_CMD_FLAG_OK_ON_BOTH | + CTL_FLAG_DATA_IN, + CTL_LUN_PAT_NONE, + 16, {0x05, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0x07}}, + +/* 06 RECEIVE COPY DATA (LID4)*/ +{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, + +/* 07 RECEIVE ROD TOKEN INFORMATION */ +{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, + +/* 08 REPORT ALL ROD TOKENS */ +{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, +}; + /* 9E SERVICE ACTION IN(16) */ const struct ctl_cmd_entry ctl_cmd_table_9e[32] = { @@ -844,10 +994,12 @@ const struct ctl_cmd_entry ctl_cmd_table {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 83 EXTENDED COPY */ -{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, +{__DECONST(ctl_opfunc *, ctl_cmd_table_83), CTL_SERIDX_INVLD, CTL_CMD_FLAG_SA5, + CTL_LUN_PAT_NONE}, /* 84 RECEIVE COPY RESULTS */ -{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, +{__DECONST(ctl_opfunc *, ctl_cmd_table_84), CTL_SERIDX_INVLD, CTL_CMD_FLAG_SA5, + CTL_LUN_PAT_NONE}, /* 85 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, Modified: head/sys/cam/ctl/ctl_frontend.c ============================================================================== --- head/sys/cam/ctl/ctl_frontend.c Wed Jul 16 14:08:01 2014 (r268766) +++ head/sys/cam/ctl/ctl_frontend.c Wed Jul 16 15:57:17 2014 (r268767) @@ -234,6 +234,8 @@ ctl_port_deregister(struct ctl_port *por port->port_devid = NULL; free(port->target_devid, M_CTL); port->target_devid = NULL; + free(port->init_devid, M_CTL); + port->init_devid = NULL; for (i = 0; i < port->max_initiators; i++) free(port->wwpn_iid[i].name, M_CTL); free(port->wwpn_iid, M_CTL); Modified: head/sys/cam/ctl/ctl_frontend.h ============================================================================== --- head/sys/cam/ctl/ctl_frontend.h Wed Jul 16 14:08:01 2014 (r268766) +++ head/sys/cam/ctl/ctl_frontend.h Wed Jul 16 15:57:17 2014 (r268767) @@ -242,6 +242,7 @@ struct ctl_port { ctl_options_t options; /* passed to CTL */ struct ctl_devid *port_devid; /* passed to CTL */ struct ctl_devid *target_devid; /* passed to CTL */ + struct ctl_devid *init_devid; /* passed to CTL */ STAILQ_ENTRY(ctl_port) fe_links; /* used by CTL */ STAILQ_ENTRY(ctl_port) links; /* used by CTL */ }; Modified: head/sys/cam/ctl/ctl_frontend_iscsi.c ============================================================================== --- head/sys/cam/ctl/ctl_frontend_iscsi.c Wed Jul 16 14:08:01 2014 (r268766) +++ head/sys/cam/ctl/ctl_frontend_iscsi.c Wed Jul 16 15:57:17 2014 (r268767) @@ -2702,7 +2702,7 @@ cfiscsi_scsi_command_done(union ctl_io * * Do not return status for aborted commands. * There are exceptions, but none supported by CTL yet. */ - if (io->io_hdr.status == CTL_CMD_ABORTED && + if ((io->io_hdr.flags & CTL_FLAG_ABORT) && (io->io_hdr.flags & CTL_FLAG_ABORT_STATUS) == 0) { ctl_free_io(io); icl_pdu_free(request); Modified: head/sys/cam/ctl/ctl_private.h ============================================================================== --- head/sys/cam/ctl/ctl_private.h Wed Jul 16 14:08:01 2014 (r268766) +++ head/sys/cam/ctl/ctl_private.h Wed Jul 16 15:57:17 2014 (r268767) @@ -373,6 +373,7 @@ struct ctl_devid { */ #define NUM_TARGET_PORT_GROUPS 2 +struct tpc_list; struct ctl_lun { struct mtx lun_lock; struct ctl_id target; @@ -403,6 +404,7 @@ struct ctl_lun { uint8_t res_type; uint8_t write_buffer[524288]; struct ctl_devid *lun_devid; + TAILQ_HEAD(tpc_lists, tpc_list) tpc_lists; }; typedef enum { @@ -467,6 +469,8 @@ struct ctl_softc { extern const struct ctl_cmd_entry ctl_cmd_table[256]; uint32_t ctl_get_initindex(struct ctl_nexus *nexus); +uint32_t ctl_get_resindex(struct ctl_nexus *nexus); +uint32_t ctl_port_idx(int port_num); int ctl_pool_create(struct ctl_softc *ctl_softc, ctl_pool_type pool_type, uint32_t total_ctl_io, struct ctl_io_pool **npool); void ctl_pool_free(struct ctl_io_pool *pool); @@ -498,6 +502,17 @@ int ctl_report_supported_tmf(struct ctl_ int ctl_report_timestamp(struct ctl_scsiio *ctsio); int ctl_isc(struct ctl_scsiio *ctsio); +void ctl_tpc_init(struct ctl_lun *lun); +void ctl_tpc_shutdown(struct ctl_lun *lun); +int ctl_inquiry_evpd_tpc(struct ctl_scsiio *ctsio, int alloc_len); +int ctl_receive_copy_status_lid1(struct ctl_scsiio *ctsio); +int ctl_receive_copy_failure_details(struct ctl_scsiio *ctsio); +int ctl_receive_copy_status_lid4(struct ctl_scsiio *ctsio); +int ctl_receive_copy_operating_parameters(struct ctl_scsiio *ctsio); +int ctl_extended_copy_lid1(struct ctl_scsiio *ctsio); +int ctl_extended_copy_lid4(struct ctl_scsiio *ctsio); +int ctl_copy_operation_abort(struct ctl_scsiio *ctsio); + #endif /* _KERNEL */ #endif /* _CTL_PRIVATE_H_ */ Modified: head/sys/cam/ctl/ctl_ser_table.c ============================================================================== --- head/sys/cam/ctl/ctl_ser_table.c Wed Jul 16 14:08:01 2014 (r268766) +++ head/sys/cam/ctl/ctl_ser_table.c Wed Jul 16 15:57:17 2014 (r268767) @@ -69,7 +69,7 @@ ctl_serialize_table[CTL_SERIDX_COUNT][CT /*MD_SEL */{ bK, bK, bK, bK, bK, bK, bK, pS, pS, bK, pS, bK, bK}, /*RQ_SNS */{ pS, pS, pS, pS, pS, pS, bK, pS, pS, bK, pS, bK, bK}, /*INQ */{ pS, pS, pS, pS, pS, pS, bK, pS, pS, pS, pS, bK, bK}, -/*RD_CAP */{ pS, pS, pS, pS, pS, pS, bK, pS, pS, bK, pS, bK, bK}, +/*RD_CAP */{ pS, pS, pS, pS, pS, pS, bK, pS, pS, pS, pS, bK, bK}, /*RES */{ bK, bK, bK, bK, bK, bK, bK, pS, bK, bK, bK, bK, bK}, /*LOG_SNS */{ pS, pS, pS, pS, pS, bK, bK, pS, pS, bK, pS, bK, bK}, /*FORMAT */{ pS, bK, bK, bK, bK, bK, pS, pS, bK, bK, bK, bK, bK}, Added: head/sys/cam/ctl/ctl_tpc.c ============================================================================== --- /dev/null 00:00:00 1970 (empty, because file is newly added) +++ head/sys/cam/ctl/ctl_tpc.c Wed Jul 16 15:57:17 2014 (r268767) @@ -0,0 +1,1370 @@ +/*- + * Copyright (c) 2014 Alexander Motin <mav@FreeBSD.org> + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer, + * without modification, immediately at the beginning of the file. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/cdefs.h> +__FBSDID("$FreeBSD$"); + +#include <sys/param.h> +#include <sys/systm.h> +#include <sys/kernel.h> +#include <sys/types.h> +#include <sys/lock.h> +#include <sys/module.h> +#include <sys/mutex.h> +#include <sys/condvar.h> +#include <sys/malloc.h> +#include <sys/conf.h> +#include <sys/queue.h> +#include <sys/sysctl.h> +#include <machine/atomic.h> + +#include <cam/cam.h> +#include <cam/scsi/scsi_all.h> +#include <cam/scsi/scsi_da.h> +#include <cam/ctl/ctl_io.h> +#include <cam/ctl/ctl.h> +#include <cam/ctl/ctl_frontend.h> +#include <cam/ctl/ctl_frontend_internal.h> +#include <cam/ctl/ctl_util.h> +#include <cam/ctl/ctl_backend.h> +#include <cam/ctl/ctl_ioctl.h> +#include <cam/ctl/ctl_ha.h> +#include <cam/ctl/ctl_private.h> +#include <cam/ctl/ctl_debug.h> +#include <cam/ctl/ctl_scsi_all.h> +#include <cam/ctl/ctl_tpc.h> +#include <cam/ctl/ctl_error.h> + +#define TPC_MAX_CSCDS 64 +#define TPC_MAX_SEGS 64 +#define TPC_MAX_SEG 0 +#define TPC_MAX_LIST 8192 +#define TPC_MAX_INLINE 0 +#define TPC_MAX_LISTS 255 +#define TPC_MAX_IO_SIZE (1024 * 1024) + +MALLOC_DEFINE(M_CTL_TPC, "ctltpc", "CTL TPC"); + +typedef enum { + TPC_ERR_RETRY = 0x000, + TPC_ERR_FAIL = 0x001, + TPC_ERR_MASK = 0x0ff, + TPC_ERR_NO_DECREMENT = 0x100 +} tpc_error_action; + +struct tpc_list; +TAILQ_HEAD(runl, tpc_io); +struct tpc_io { + union ctl_io *io; + uint64_t lun; + struct tpc_list *list; + struct runl run; + TAILQ_ENTRY(tpc_io) rlinks; + TAILQ_ENTRY(tpc_io) links; +}; + +struct tpc_list { + uint8_t service_action; + int init_port; + uint16_t init_idx; + uint32_t list_id; + uint8_t flags; + uint8_t *params; + struct scsi_ec_cscd *cscd; + struct scsi_ec_segment *seg[TPC_MAX_SEGS]; + uint8_t *inl; + int ncscd; + int nseg; + int leninl; + int curseg; + off_t curbytes; + int curops; + int stage; + uint8_t *buf; + int segbytes; + int tbdio; + int error; + int abort; + int completed; + TAILQ_HEAD(, tpc_io) allio; + struct scsi_sense_data sense_data; + uint8_t sense_len; + uint8_t scsi_status; + struct ctl_scsiio *ctsio; + struct ctl_lun *lun; + TAILQ_ENTRY(tpc_list) links; +}; + +void +ctl_tpc_init(struct ctl_lun *lun) +{ + + TAILQ_INIT(&lun->tpc_lists); +} + +void +ctl_tpc_shutdown(struct ctl_lun *lun) +{ + struct tpc_list *list; + + while ((list = TAILQ_FIRST(&lun->tpc_lists)) != NULL) { + TAILQ_REMOVE(&lun->tpc_lists, list, links); + KASSERT(list->completed, + ("Not completed TPC (%p) on shutdown", list)); + free(list, M_CTL); + } +} + +int +ctl_inquiry_evpd_tpc(struct ctl_scsiio *ctsio, int alloc_len) +{ + struct scsi_vpd_tpc *tpc_ptr; + struct scsi_vpd_tpc_descriptor *d_ptr; + struct scsi_vpd_tpc_descriptor_sc *sc_ptr; + struct scsi_vpd_tpc_descriptor_sc_descr *scd_ptr; + struct scsi_vpd_tpc_descriptor_pd *pd_ptr; + struct scsi_vpd_tpc_descriptor_sd *sd_ptr; + struct scsi_vpd_tpc_descriptor_sdid *sdid_ptr; + struct scsi_vpd_tpc_descriptor_gco *gco_ptr; + struct ctl_lun *lun; + int data_len; + + lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; + + data_len = sizeof(struct scsi_vpd_tpc) + + roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sc) + + 2 * sizeof(struct scsi_vpd_tpc_descriptor_sc_descr) + 7, 4) + + sizeof(struct scsi_vpd_tpc_descriptor_pd) + + roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sd) + 4, 4) + + roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sdid) + 2, 4) + + sizeof(struct scsi_vpd_tpc_descriptor_gco); + + ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); + tpc_ptr = (struct scsi_vpd_tpc *)ctsio->kern_data_ptr; + ctsio->kern_sg_entries = 0; + + if (data_len < alloc_len) { + ctsio->residual = alloc_len - data_len; + ctsio->kern_data_len = data_len; + ctsio->kern_total_len = data_len; + } else { + ctsio->residual = 0; + ctsio->kern_data_len = alloc_len; + ctsio->kern_total_len = alloc_len; + } + ctsio->kern_data_resid = 0; + ctsio->kern_rel_offset = 0; + ctsio->kern_sg_entries = 0; + + /* + * The control device is always connected. The disk device, on the + * other hand, may not be online all the time. + */ + if (lun != NULL) + tpc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | + lun->be_lun->lun_type; + else + tpc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; + tpc_ptr->page_code = SVPD_SCSI_TPC; + scsi_ulto2b(data_len - 4, tpc_ptr->page_length); + + /* Supported commands */ + d_ptr = (struct scsi_vpd_tpc_descriptor *)&tpc_ptr->descr[0]; + sc_ptr = (struct scsi_vpd_tpc_descriptor_sc *)d_ptr; + scsi_ulto2b(SVPD_TPC_SC, sc_ptr->desc_type); + sc_ptr->list_length = 2 * sizeof(*scd_ptr) + 7; + scsi_ulto2b(roundup2(1 + sc_ptr->list_length, 4), sc_ptr->desc_length); + scd_ptr = &sc_ptr->descr[0]; + scd_ptr->opcode = EXTENDED_COPY; + scd_ptr->sa_length = 3; + scd_ptr->supported_service_actions[0] = EC_EC_LID1; + scd_ptr->supported_service_actions[1] = EC_EC_LID4; + scd_ptr->supported_service_actions[2] = EC_COA; + scd_ptr = (struct scsi_vpd_tpc_descriptor_sc_descr *) + &scd_ptr->supported_service_actions[scd_ptr->sa_length]; + scd_ptr->opcode = RECEIVE_COPY_STATUS; + scd_ptr->sa_length = 4; + scd_ptr->supported_service_actions[0] = RCS_RCS_LID1; + scd_ptr->supported_service_actions[1] = RCS_RCFD; + scd_ptr->supported_service_actions[2] = RCS_RCS_LID4; + scd_ptr->supported_service_actions[3] = RCS_RCOP; + + /* Parameter data. */ + d_ptr = (struct scsi_vpd_tpc_descriptor *) + (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length)); + pd_ptr = (struct scsi_vpd_tpc_descriptor_pd *)d_ptr; + scsi_ulto2b(SVPD_TPC_PD, pd_ptr->desc_type); + scsi_ulto2b(sizeof(*pd_ptr) - 4, pd_ptr->desc_length); + scsi_ulto2b(TPC_MAX_CSCDS, pd_ptr->maximum_cscd_descriptor_count); + scsi_ulto2b(TPC_MAX_SEGS, pd_ptr->maximum_segment_descriptor_count); + scsi_ulto4b(TPC_MAX_LIST, pd_ptr->maximum_descriptor_list_length); + scsi_ulto4b(TPC_MAX_INLINE, pd_ptr->maximum_inline_data_length); + + /* Supported Descriptors */ + d_ptr = (struct scsi_vpd_tpc_descriptor *) + (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length)); + sd_ptr = (struct scsi_vpd_tpc_descriptor_sd *)d_ptr; + scsi_ulto2b(SVPD_TPC_SD, sd_ptr->desc_type); + scsi_ulto2b(roundup2(sizeof(*sd_ptr) - 4 + 4, 4), sd_ptr->desc_length); + sd_ptr->list_length = 4; + sd_ptr->supported_descriptor_codes[0] = EC_SEG_B2B; + sd_ptr->supported_descriptor_codes[1] = EC_SEG_VERIFY; + sd_ptr->supported_descriptor_codes[2] = EC_SEG_REGISTER_KEY; + sd_ptr->supported_descriptor_codes[3] = EC_CSCD_ID; + + /* Supported CSCD Descriptor IDs */ + d_ptr = (struct scsi_vpd_tpc_descriptor *) + (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length)); + sdid_ptr = (struct scsi_vpd_tpc_descriptor_sdid *)d_ptr; + scsi_ulto2b(SVPD_TPC_SDID, sdid_ptr->desc_type); + scsi_ulto2b(roundup2(sizeof(*sdid_ptr) - 4 + 2, 4), sdid_ptr->desc_length); + scsi_ulto2b(2, sdid_ptr->list_length); + scsi_ulto2b(0xffff, &sdid_ptr->supported_descriptor_ids[0]); + + /* General Copy Operations */ + d_ptr = (struct scsi_vpd_tpc_descriptor *) + (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length)); + gco_ptr = (struct scsi_vpd_tpc_descriptor_gco *)d_ptr; + scsi_ulto2b(SVPD_TPC_GCO, gco_ptr->desc_type); + scsi_ulto2b(sizeof(*gco_ptr) - 4, gco_ptr->desc_length); + scsi_ulto4b(TPC_MAX_LISTS, gco_ptr->total_concurrent_copies); + scsi_ulto4b(TPC_MAX_LISTS, gco_ptr->maximum_identified_concurrent_copies); + scsi_ulto4b(TPC_MAX_SEG, gco_ptr->maximum_segment_length); + gco_ptr->data_segment_granularity = 0; + gco_ptr->inline_data_granularity = 0; + + ctsio->scsi_status = SCSI_STATUS_OK; + ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; + ctsio->be_move_done = ctl_config_move_done; + ctl_datamove((union ctl_io *)ctsio); + + return (CTL_RETVAL_COMPLETE); +} + +int +ctl_receive_copy_operating_parameters(struct ctl_scsiio *ctsio) +{ + struct ctl_lun *lun; + struct scsi_receive_copy_operating_parameters *cdb; + struct scsi_receive_copy_operating_parameters_data *data; + int retval; + int alloc_len, total_len; + + CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n")); + + cdb = (struct scsi_receive_copy_operating_parameters *)ctsio->cdb; + lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; + + retval = CTL_RETVAL_COMPLETE; + + total_len = sizeof(*data) + 4; + alloc_len = scsi_4btoul(cdb->length); + + ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); + + ctsio->kern_sg_entries = 0; + + if (total_len < alloc_len) { + ctsio->residual = alloc_len - total_len; + ctsio->kern_data_len = total_len; + ctsio->kern_total_len = total_len; + } else { + ctsio->residual = 0; + ctsio->kern_data_len = alloc_len; + ctsio->kern_total_len = alloc_len; + } + ctsio->kern_data_resid = 0; + ctsio->kern_rel_offset = 0; + + data = (struct scsi_receive_copy_operating_parameters_data *)ctsio->kern_data_ptr; + scsi_ulto4b(sizeof(*data) - 4 + 4, data->length); + data->snlid = RCOP_SNLID; + scsi_ulto2b(TPC_MAX_CSCDS, data->maximum_cscd_descriptor_count); + scsi_ulto2b(TPC_MAX_SEGS, data->maximum_segment_descriptor_count); + scsi_ulto4b(TPC_MAX_LIST, data->maximum_descriptor_list_length); + scsi_ulto4b(TPC_MAX_SEG, data->maximum_segment_length); + scsi_ulto4b(TPC_MAX_INLINE, data->maximum_inline_data_length); + scsi_ulto4b(0, data->held_data_limit); + scsi_ulto4b(0, data->maximum_stream_device_transfer_size); + scsi_ulto2b(TPC_MAX_LISTS, data->total_concurrent_copies); + data->maximum_concurrent_copies = TPC_MAX_LISTS; + data->data_segment_granularity = 0; + data->inline_data_granularity = 0; + data->held_data_granularity = 0; + data->implemented_descriptor_list_length = 4; + data->list_of_implemented_descriptor_type_codes[0] = EC_SEG_B2B; + data->list_of_implemented_descriptor_type_codes[1] = EC_SEG_VERIFY; + data->list_of_implemented_descriptor_type_codes[2] = EC_SEG_REGISTER_KEY; + data->list_of_implemented_descriptor_type_codes[3] = EC_CSCD_ID; + + ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; + ctsio->be_move_done = ctl_config_move_done; + + ctl_datamove((union ctl_io *)ctsio); + return (retval); +} + +int +ctl_receive_copy_status_lid1(struct ctl_scsiio *ctsio) +{ + struct ctl_lun *lun; + struct scsi_receive_copy_status_lid1 *cdb; + struct scsi_receive_copy_status_lid1_data *data; + struct tpc_list *list; + struct tpc_list list_copy; + int retval; + int alloc_len, total_len; + uint32_t list_id; + + CTL_DEBUG_PRINT(("ctl_receive_copy_status_lid1\n")); + + cdb = (struct scsi_receive_copy_status_lid1 *)ctsio->cdb; + lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; + + retval = CTL_RETVAL_COMPLETE; + + list_id = cdb->list_identifier; + mtx_lock(&lun->lun_lock); + TAILQ_FOREACH(list, &lun->tpc_lists, links) { + if ((list->flags & EC_LIST_ID_USAGE_MASK) != + EC_LIST_ID_USAGE_NONE && list->list_id == list_id) + break; + } + if (list == NULL) { + mtx_unlock(&lun->lun_lock); + ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, + /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, + /*bit*/ 0); + ctl_done((union ctl_io *)ctsio); + return (retval); + } + list_copy = *list; + if (list->completed) { + TAILQ_REMOVE(&lun->tpc_lists, list, links); + free(list, M_CTL); + } + mtx_unlock(&lun->lun_lock); + + total_len = sizeof(*data); + alloc_len = scsi_4btoul(cdb->length); + + ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); + + ctsio->kern_sg_entries = 0; + + if (total_len < alloc_len) { + ctsio->residual = alloc_len - total_len; + ctsio->kern_data_len = total_len; + ctsio->kern_total_len = total_len; + } else { + ctsio->residual = 0; + ctsio->kern_data_len = alloc_len; + ctsio->kern_total_len = alloc_len; + } + ctsio->kern_data_resid = 0; + ctsio->kern_rel_offset = 0; + + data = (struct scsi_receive_copy_status_lid1_data *)ctsio->kern_data_ptr; + scsi_ulto4b(sizeof(*data) - 4, data->available_data); + if (list_copy.completed) { + if (list_copy.error || list_copy.abort) + data->copy_command_status = RCS_CCS_ERROR; + else + data->copy_command_status = RCS_CCS_COMPLETED; + } else + data->copy_command_status = RCS_CCS_INPROG; + scsi_ulto2b(list_copy.curseg, data->segments_processed); + if (list_copy.curbytes <= UINT32_MAX) { + data->transfer_count_units = RCS_TC_BYTES; + scsi_ulto4b(list_copy.curbytes, data->transfer_count); + } else { + data->transfer_count_units = RCS_TC_MBYTES; + scsi_ulto4b(list_copy.curbytes >> 20, data->transfer_count); + } + + ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; + ctsio->be_move_done = ctl_config_move_done; + + ctl_datamove((union ctl_io *)ctsio); + return (retval); +} + +int +ctl_receive_copy_failure_details(struct ctl_scsiio *ctsio) +{ + struct ctl_lun *lun; + struct scsi_receive_copy_failure_details *cdb; + struct scsi_receive_copy_failure_details_data *data; + struct tpc_list *list; + struct tpc_list list_copy; + int retval; + int alloc_len, total_len; + uint32_t list_id; + + CTL_DEBUG_PRINT(("ctl_receive_copy_failure_details\n")); + + cdb = (struct scsi_receive_copy_failure_details *)ctsio->cdb; + lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; + + retval = CTL_RETVAL_COMPLETE; + + list_id = cdb->list_identifier; + mtx_lock(&lun->lun_lock); + TAILQ_FOREACH(list, &lun->tpc_lists, links) { + if (list->completed && (list->flags & EC_LIST_ID_USAGE_MASK) != + EC_LIST_ID_USAGE_NONE && list->list_id == list_id) + break; + } + if (list == NULL) { + mtx_unlock(&lun->lun_lock); + ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, + /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, + /*bit*/ 0); + ctl_done((union ctl_io *)ctsio); + return (retval); + } + list_copy = *list; + TAILQ_REMOVE(&lun->tpc_lists, list, links); + free(list, M_CTL); + mtx_unlock(&lun->lun_lock); + + total_len = sizeof(*data) + list_copy.sense_len; + alloc_len = scsi_4btoul(cdb->length); + + ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); + + ctsio->kern_sg_entries = 0; + + if (total_len < alloc_len) { + ctsio->residual = alloc_len - total_len; + ctsio->kern_data_len = total_len; + ctsio->kern_total_len = total_len; + } else { + ctsio->residual = 0; + ctsio->kern_data_len = alloc_len; + ctsio->kern_total_len = alloc_len; + } + ctsio->kern_data_resid = 0; + ctsio->kern_rel_offset = 0; + + data = (struct scsi_receive_copy_failure_details_data *)ctsio->kern_data_ptr; + if (list_copy.completed && (list_copy.error || list_copy.abort)) { + scsi_ulto4b(sizeof(*data) - 4, data->available_data); + data->copy_command_status = RCS_CCS_ERROR; + } else + scsi_ulto4b(0, data->available_data); + scsi_ulto2b(list_copy.sense_len, data->sense_data_length); + memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len); + + ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; + ctsio->be_move_done = ctl_config_move_done; + + ctl_datamove((union ctl_io *)ctsio); + return (retval); +} + +int +ctl_receive_copy_status_lid4(struct ctl_scsiio *ctsio) +{ + struct ctl_lun *lun; + struct scsi_receive_copy_status_lid4 *cdb; + struct scsi_receive_copy_status_lid4_data *data; + struct tpc_list *list; + struct tpc_list list_copy; + int retval; *** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201407161557.s6GFvI8L057466>