Skip site navigation (1)Skip section navigation (2)
Date:      Fri, 14 Dec 2018 08:02:44 +0000 (UTC)
From:      Kashyap D Desai <kadesai@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r342064 - head/sys/dev/mrsas
Message-ID:  <201812140802.wBE82ip3087646@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: kadesai
Date: Fri Dec 14 08:02:44 2018
New Revision: 342064
URL: https://svnweb.freebsd.org/changeset/base/342064

Log:
  To improve RAID 1/10 Write performance, OS drivers need to issue the
  required Write IOs as Fast Path IOs (after the appropriate checks
  allowing Fast Path to be used) to the appropriate physical drives
  (translated from the OS logical IO) and wait for all Write IOs to complete.
  
  Design: A write IO on RAID volume will be examined if it can be sent in
  Fast Path based on IO size and starting LBA and ending LBA falling on to
  a Physical Drive boundary. If the underlying RAID volume is a RAID 1/10,
  driver issues two fast path write IOs one for each corresponding physical
  drive after computing the corresponding start LBA for each physical drive.
  Both write IOs will have the same payload and are posted to HW such that
  replies land in the same reply queue.
  
  If there are no resources available for sending two IOs, driver will send
  the original IO from upper layer to RAID volume through the Firmware.
  
  When both IOs are completed by HW, the resources will be released
  and SCSI IO completion handler will be called.
  
  Submitted by: Sumit Saxena <sumit.saxena@broadcom.com>
  Reviewed by:  Kashyap Desai <Kashyap.Desai@broadcom.com>
  Approved by:  ken
  MFC after:  3 days
  Sponsored by:   Broadcom Inc

Modified:
  head/sys/dev/mrsas/mrsas.c
  head/sys/dev/mrsas/mrsas.h
  head/sys/dev/mrsas/mrsas_cam.c
  head/sys/dev/mrsas/mrsas_fp.c

Modified: head/sys/dev/mrsas/mrsas.c
==============================================================================
--- head/sys/dev/mrsas/mrsas.c	Fri Dec 14 08:01:49 2018	(r342063)
+++ head/sys/dev/mrsas/mrsas.c	Fri Dec 14 08:02:44 2018	(r342064)
@@ -141,9 +141,6 @@ void	mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc);
 void
 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc,
     struct mrsas_mfi_cmd *cmd, u_int8_t status);
-void
-mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status,
-    u_int8_t extStatus);
 struct mrsas_mfi_cmd *mrsas_get_mfi_cmd(struct mrsas_softc *sc);
 
 MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_build_mpt_cmd
@@ -167,7 +164,13 @@ mrsas_get_request_desc(struct mrsas_softc *sc,
 extern int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim);
 static int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc);
 static void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc);
+void	mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd);
 
+void mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd,
+	union ccb *ccb_ptr, u_int8_t status, u_int8_t extStatus,
+	u_int32_t data_length, u_int8_t *sense);
+
+
 SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD, 0, "MRSAS Driver Parameters");
 
 /*
@@ -1125,7 +1128,7 @@ void
 mrsas_free_mem(struct mrsas_softc *sc)
 {
 	int i;
-	u_int32_t max_cmd;
+	u_int32_t max_fw_cmds;
 	struct mrsas_mfi_cmd *mfi_cmd;
 	struct mrsas_mpt_cmd *mpt_cmd;
 
@@ -1227,9 +1230,9 @@ mrsas_free_mem(struct mrsas_softc *sc)
 	/*
 	 * Free MPT internal command list
 	 */
-	max_cmd = sc->max_fw_cmds;
+	max_fw_cmds = sc->max_fw_cmds;
 	if (sc->mpt_cmd_list) {
-		for (i = 0; i < max_cmd; i++) {
+		for (i = 0; i < max_fw_cmds; i++) {
 			mpt_cmd = sc->mpt_cmd_list[i];
 			bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap);
 			free(sc->mpt_cmd_list[i], M_MRSAS);
@@ -1569,14 +1572,14 @@ mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t M
 	Mpi2ReplyDescriptorsUnion_t *desc;
 	MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
 	MRSAS_RAID_SCSI_IO_REQUEST *scsi_io_req;
-	struct mrsas_mpt_cmd *cmd_mpt;
+	struct mrsas_mpt_cmd *cmd_mpt, *r1_cmd = NULL;
 	struct mrsas_mfi_cmd *cmd_mfi;
-	u_int8_t reply_descript_type;
+	u_int8_t reply_descript_type, *sense;
 	u_int16_t smid, num_completed;
 	u_int8_t status, extStatus;
 	union desc_value desc_val;
 	PLD_LOAD_BALANCE_INFO lbinfo;
-	u_int32_t device_id;
+	u_int32_t device_id, data_length;
 	int threshold_reply_count = 0;
 #if TM_DEBUG
 	MR_TASK_MANAGE_REQUEST *mr_tm_req;
@@ -1606,6 +1609,8 @@ mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t M
 
 		status = scsi_io_req->RaidContext.raid_context.status;
 		extStatus = scsi_io_req->RaidContext.raid_context.exStatus;
+		sense = cmd_mpt->sense;
+		data_length = scsi_io_req->DataLength;
 
 		switch (scsi_io_req->Function) {
 		case MPI2_FUNCTION_SCSI_TASK_MGMT:
@@ -1621,16 +1626,45 @@ mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t M
 		case MPI2_FUNCTION_SCSI_IO_REQUEST:	/* Fast Path IO. */
 			device_id = cmd_mpt->ccb_ptr->ccb_h.target_id;
 			lbinfo = &sc->load_balance_info[device_id];
+			/* R1 load balancing for READ */
 			if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) {
 				mrsas_atomic_dec(&lbinfo->scsi_pending_cmds[cmd_mpt->pd_r1_lb]);
 				cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG;
 			}
 			/* Fall thru and complete IO */
 		case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST:
-			mrsas_map_mpt_cmd_status(cmd_mpt, status, extStatus);
-			mrsas_cmd_done(sc, cmd_mpt);
-			scsi_io_req->RaidContext.raid_context.status = 0;
-			scsi_io_req->RaidContext.raid_context.exStatus = 0;
+			if (cmd_mpt->r1_alt_dev_handle == MR_DEVHANDLE_INVALID) {
+				mrsas_map_mpt_cmd_status(cmd_mpt, cmd_mpt->ccb_ptr, status,
+				    extStatus, data_length, sense);
+				mrsas_cmd_done(sc, cmd_mpt);
+			} else {
+				/*
+				 * If the peer  Raid  1/10 fast path failed,
+				 * mark IO as failed to the scsi layer.
+				 * Overwrite the current status by the failed status
+				 * and make sure that if any command fails,
+				 * driver returns fail status to CAM.
+				 */
+				cmd_mpt->cmd_completed = 1;
+				r1_cmd = cmd_mpt->peer_cmd;
+				if (r1_cmd->cmd_completed) {
+					if (r1_cmd->io_request->RaidContext.raid_context.status != MFI_STAT_OK) {
+						status = r1_cmd->io_request->RaidContext.raid_context.status;
+						extStatus = r1_cmd->io_request->RaidContext.raid_context.exStatus;
+						data_length = r1_cmd->io_request->DataLength;
+						sense = r1_cmd->sense;
+					}
+					r1_cmd->ccb_ptr = NULL;
+					if (r1_cmd->callout_owner) {
+						callout_stop(&r1_cmd->cm_callout);
+						r1_cmd->callout_owner  = false;
+					}
+					mrsas_release_mpt_cmd(r1_cmd);
+					mrsas_map_mpt_cmd_status(cmd_mpt, cmd_mpt->ccb_ptr, status,
+					    extStatus, data_length, sense);
+					mrsas_cmd_done(sc, cmd_mpt);
+				}
+			}
 			mrsas_atomic_dec(&sc->fw_outstanding);
 			break;
 		case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST:	/* MFI command */
@@ -1723,40 +1757,41 @@ mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t M
  * CCB.
  */
 void
-mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status, u_int8_t extStatus)
+mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, union ccb *ccb_ptr, u_int8_t status,
+    u_int8_t extStatus, u_int32_t data_length, u_int8_t *sense)
 {
 	struct mrsas_softc *sc = cmd->sc;
 	u_int8_t *sense_data;
 
 	switch (status) {
 	case MFI_STAT_OK:
-		cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP;
+		ccb_ptr->ccb_h.status = CAM_REQ_CMP;
 		break;
 	case MFI_STAT_SCSI_IO_FAILED:
 	case MFI_STAT_SCSI_DONE_WITH_ERROR:
-		cmd->ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR;
-		sense_data = (u_int8_t *)&cmd->ccb_ptr->csio.sense_data;
+		ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR;
+		sense_data = (u_int8_t *)&ccb_ptr->csio.sense_data;
 		if (sense_data) {
 			/* For now just copy 18 bytes back */
-			memcpy(sense_data, cmd->sense, 18);
-			cmd->ccb_ptr->csio.sense_len = 18;
-			cmd->ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID;
+			memcpy(sense_data, sense, 18);
+			ccb_ptr->csio.sense_len = 18;
+			ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID;
 		}
 		break;
 	case MFI_STAT_LD_OFFLINE:
 	case MFI_STAT_DEVICE_NOT_FOUND:
-		if (cmd->ccb_ptr->ccb_h.target_lun)
-			cmd->ccb_ptr->ccb_h.status |= CAM_LUN_INVALID;
+		if (ccb_ptr->ccb_h.target_lun)
+			ccb_ptr->ccb_h.status |= CAM_LUN_INVALID;
 		else
-			cmd->ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE;
+			ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE;
 		break;
 	case MFI_STAT_CONFIG_SEQ_MISMATCH:
-		cmd->ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ;
+		ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ;
 		break;
 	default:
 		device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status);
-		cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR;
-		cmd->ccb_ptr->csio.scsi_status = status;
+		ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR;
+		ccb_ptr->csio.scsi_status = status;
 	}
 	return;
 }
@@ -1943,6 +1978,7 @@ mrsas_alloc_mem(struct mrsas_softc *sc)
 		device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n");
 		return (ENOMEM);
 	}
+
 	/*
 	 * Allocate for Event detail structure
 	 */
@@ -2383,25 +2419,26 @@ int
 mrsas_init_adapter(struct mrsas_softc *sc)
 {
 	uint32_t status;
-	u_int32_t max_cmd, scratch_pad_2;
+	u_int32_t scratch_pad_2;
 	int ret;
 	int i = 0;
 
 	/* Read FW status register */
 	status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
 
-	/* Get operational params from status register */
 	sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK;
 
 	/* Decrement the max supported by 1, to correlate with FW */
 	sc->max_fw_cmds = sc->max_fw_cmds - 1;
-	max_cmd = sc->max_fw_cmds;
+	sc->max_scsi_cmds = sc->max_fw_cmds -
+	    (MRSAS_FUSION_INT_CMDS + MRSAS_MAX_IOCTL_CMDS);
 
 	/* Determine allocation size of command frames */
-	sc->reply_q_depth = ((max_cmd + 1 + 15) / 16 * 16) * 2;
-	sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * max_cmd;
+	sc->reply_q_depth = ((sc->max_fw_cmds + 1 + 15) / 16 * 16) * 2;
+	sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * sc->max_fw_cmds;
 	sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth);
-	sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (max_cmd + 1));
+	sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE +
+	    (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (sc->max_fw_cmds + 1));
 	scratch_pad_2 = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
 	    outbound_scratch_pad_2));
 	/*
@@ -2419,15 +2456,17 @@ mrsas_init_adapter(struct mrsas_softc *sc)
 		    ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5)
 		    * MEGASAS_256K_IO;
 
-	sc->chain_frames_alloc_sz = sc->max_chain_frame_sz * max_cmd;
+	sc->chain_frames_alloc_sz = sc->max_chain_frame_sz * sc->max_fw_cmds;
 	sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
 	    offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)) / 16;
 
 	sc->max_sge_in_chain = sc->max_chain_frame_sz / sizeof(MPI2_SGE_IO_UNION);
 	sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2;
 
-	mrsas_dprint(sc, MRSAS_INFO, "Avago Debug: MAX sge 0x%X MAX chain frame size 0x%X \n",
-	    sc->max_num_sge, sc->max_chain_frame_sz);
+	mrsas_dprint(sc, MRSAS_INFO,
+	    "max sge: 0x%x, max chain frame size: 0x%x, "
+	    "max fw cmd: 0x%x\n", sc->max_num_sge,
+	    sc->max_chain_frame_sz, sc->max_fw_cmds);
 
 	/* Used for pass thru MFI frame (DCMD) */
 	sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 16;
@@ -2630,14 +2669,14 @@ int
 mrsas_alloc_mpt_cmds(struct mrsas_softc *sc)
 {
 	int i, j;
-	u_int32_t max_cmd, count;
+	u_int32_t max_fw_cmds, count;
 	struct mrsas_mpt_cmd *cmd;
 	pMpi2ReplyDescriptorsUnion_t reply_desc;
 	u_int32_t offset, chain_offset, sense_offset;
 	bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys;
 	u_int8_t *io_req_base, *chain_frame_base, *sense_base;
 
-	max_cmd = sc->max_fw_cmds;
+	max_fw_cmds = sc->max_fw_cmds;
 
 	sc->req_desc = malloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT);
 	if (!sc->req_desc) {
@@ -2651,13 +2690,14 @@ mrsas_alloc_mpt_cmds(struct mrsas_softc *sc)
 	 * Allocate the dynamic array first and then allocate individual
 	 * commands.
 	 */
-	sc->mpt_cmd_list = malloc(sizeof(struct mrsas_mpt_cmd *) * max_cmd, M_MRSAS, M_NOWAIT);
+	sc->mpt_cmd_list = malloc(sizeof(struct mrsas_mpt_cmd *) * max_fw_cmds,
+	    M_MRSAS, M_NOWAIT);
 	if (!sc->mpt_cmd_list) {
 		device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n");
 		return (ENOMEM);
 	}
-	memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *) * max_cmd);
-	for (i = 0; i < max_cmd; i++) {
+	memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *) * max_fw_cmds);
+	for (i = 0; i < max_fw_cmds; i++) {
 		sc->mpt_cmd_list[i] = malloc(sizeof(struct mrsas_mpt_cmd),
 		    M_MRSAS, M_NOWAIT);
 		if (!sc->mpt_cmd_list[i]) {
@@ -2675,7 +2715,7 @@ mrsas_alloc_mpt_cmds(struct mrsas_softc *sc)
 	chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr;
 	sense_base = (u_int8_t *)sc->sense_mem;
 	sense_base_phys = (bus_addr_t)sc->sense_phys_addr;
-	for (i = 0; i < max_cmd; i++) {
+	for (i = 0; i < max_fw_cmds; i++) {
 		cmd = sc->mpt_cmd_list[i];
 		offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
 		chain_offset = sc->max_chain_frame_sz * i;
@@ -2683,6 +2723,7 @@ mrsas_alloc_mpt_cmds(struct mrsas_softc *sc)
 		memset(cmd, 0, sizeof(struct mrsas_mpt_cmd));
 		cmd->index = i + 1;
 		cmd->ccb_ptr = NULL;
+		cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
 		callout_init_mtx(&cmd->cm_callout, &sc->sim_lock, 0);
 		cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
 		cmd->sc = sc;
@@ -3034,13 +3075,27 @@ mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t rese
 		mtx_unlock(&sc->sim_lock);
 		for (i = 0; i < sc->max_fw_cmds; i++) {
 			mpt_cmd = sc->mpt_cmd_list[i];
+
+			if (mpt_cmd->peer_cmd) {
+				mrsas_dprint(sc, MRSAS_OCR,
+				    "R1 FP command [%d] - (mpt_cmd) %p, (peer_cmd) %p\n",
+				    i, mpt_cmd, mpt_cmd->peer_cmd);
+			}
+
 			if (mpt_cmd->ccb_ptr) {
-				ccb = (union ccb *)(mpt_cmd->ccb_ptr);
-				ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
-				mrsas_cmd_done(sc, mpt_cmd);
-				mrsas_atomic_dec(&sc->fw_outstanding);
+				if (mpt_cmd->callout_owner) {
+					ccb = (union ccb *)(mpt_cmd->ccb_ptr);
+					ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
+					mrsas_cmd_done(sc, mpt_cmd);
+				} else {
+					mpt_cmd->ccb_ptr = NULL;
+					mrsas_release_mpt_cmd(mpt_cmd);
+				}
 			}
 		}
+
+		mrsas_atomic_set(&sc->fw_outstanding, 0);
+
 		mtx_lock(&sc->sim_lock);
 
 		status_reg = mrsas_read_reg(sc, offsetof(mrsas_reg_set,

Modified: head/sys/dev/mrsas/mrsas.h
==============================================================================
--- head/sys/dev/mrsas/mrsas.h	Fri Dec 14 08:01:49 2018	(r342063)
+++ head/sys/dev/mrsas/mrsas.h	Fri Dec 14 08:02:44 2018	(r342064)
@@ -672,6 +672,12 @@ typedef union {
 #define	mrsas_atomic_dec(v)	atomic_subtract_int(&(v)->val, 1)
 #define	mrsas_atomic_inc(v)	atomic_add_int(&(v)->val, 1)
 
+static inline int
+mrsas_atomic_inc_return(mrsas_atomic_t *v)
+{
+	return 1 + atomic_fetchadd_int(&(v)->val, 1);
+}
+
 /* IOCInit Request message */
 typedef struct _MPI2_IOC_INIT_REQUEST {
 	u_int8_t WhoInit;		/* 0x00 */
@@ -707,6 +713,7 @@ Mpi2IOCInitRequest_t, MPI2_POINTER pMpi2IOCInitRequest
  * MR private defines
  */
 #define	MR_PD_INVALID			0xFFFF
+#define	MR_DEVHANDLE_INVALID	0xFFFF
 #define	MAX_SPAN_DEPTH			8
 #define	MAX_QUAD_DEPTH			MAX_SPAN_DEPTH
 #define	MAX_RAIDMAP_SPAN_DEPTH	(MAX_SPAN_DEPTH)
@@ -1019,6 +1026,7 @@ struct IO_REQUEST_INFO {
 	u_int8_t span_arm;
 	u_int8_t pd_after_lb;
 	boolean_t raCapable;
+	u_int16_t r1_alt_dev_handle;
 };
 
 /*
@@ -1528,6 +1536,7 @@ enum MR_EVT_ARGS {
 #define	MR_RL_FLAGS_GRANT_DESTINATION_CPU1			0x10
 #define	MR_RL_FLAGS_GRANT_DESTINATION_CUDA			0x80
 #define	MR_RL_FLAGS_SEQ_NUM_ENABLE					0x8
+#define	MR_RL_WRITE_THROUGH_MODE					0x00
 #define	MR_RL_WRITE_BACK_MODE						0x01
 
 /*
@@ -1591,6 +1600,7 @@ typedef enum _REGION_TYPE {
 #define	MRSAS_SCSI_MAX_CDB_LEN			16
 #define	MRSAS_SCSI_SENSE_BUFFERSIZE		96
 #define	MRSAS_INTERNAL_CMDS				32
+#define	MRSAS_FUSION_INT_CMDS			8
 
 #define	MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK	0x400000
 #define	MEGASAS_MAX_CHAIN_SIZE_MASK		0x3E0
@@ -1662,6 +1672,10 @@ struct mrsas_mpt_cmd {
 	struct callout cm_callout;
 	struct mrsas_softc *sc;
 	boolean_t tmCapable;
+	u_int16_t r1_alt_dev_handle;
+	boolean_t cmd_completed;
+	struct mrsas_mpt_cmd *peer_cmd;
+	bool	callout_owner;
 	TAILQ_ENTRY(mrsas_mpt_cmd) next;
 };
 
@@ -2988,6 +3002,7 @@ struct mrsas_softc {
 
 	struct sema ioctl_count_sema;
 	uint32_t max_fw_cmds;
+	uint16_t max_scsi_cmds;
 	uint32_t max_num_sge;
 	struct resource *mrsas_irq[MAX_MSIX_COUNT];
 	void   *intr_handle[MAX_MSIX_COUNT];
@@ -3063,6 +3078,7 @@ struct mrsas_softc {
 	u_int32_t max_sectors_per_req;
 	u_int32_t disableOnlineCtrlReset;
 	mrsas_atomic_t fw_outstanding;
+
 	u_int32_t mrsas_debug;
 	u_int32_t mrsas_io_timeout;
 	u_int32_t mrsas_fw_fault_check_delay;

Modified: head/sys/dev/mrsas/mrsas_cam.c
==============================================================================
--- head/sys/dev/mrsas/mrsas_cam.c	Fri Dec 14 08:01:49 2018	(r342063)
+++ head/sys/dev/mrsas/mrsas_cam.c	Fri Dec 14 08:02:44 2018	(r342064)
@@ -48,7 +48,6 @@ __FBSDID("$FreeBSD$");
 #include <sys/taskqueue.h>
 #include <sys/kernel.h>
 
-
 #include <sys/time.h>			/* XXX for pcpu.h */
 #include <sys/pcpu.h>			/* XXX for PCPU_GET */
 
@@ -110,9 +109,6 @@ struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_s
 MRSAS_REQUEST_DESCRIPTOR_UNION *
 	mrsas_get_request_desc(struct mrsas_softc *sc, u_int16_t index);
 
-extern void
-mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status,
-    u_int8_t extStatus);
 extern int mrsas_reset_targets(struct mrsas_softc *sc);
 extern u_int16_t MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map);
 extern u_int32_t
@@ -129,15 +125,13 @@ MR_LdSpanArrayGet(u_int32_t ld, u_int32_t span,
 extern u_int16_t 
 mrsas_get_updated_dev_handle(struct mrsas_softc *sc,
     PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *io_info);
-extern u_int8_t
-megasas_get_best_arm(PLD_LOAD_BALANCE_INFO lbInfo, u_int8_t arm,
-    u_int64_t block, u_int32_t count);
 extern int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex);
 extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
 extern void mrsas_disable_intr(struct mrsas_softc *sc);
 extern void mrsas_enable_intr(struct mrsas_softc *sc);
+void mrsas_prepare_secondRaid1_IO(struct mrsas_softc *sc,
+    struct mrsas_mpt_cmd *cmd);
 
-
 /*
  * mrsas_cam_attach:	Main entry to CAM subsystem
  * input:				Adapter instance soft state
@@ -152,7 +146,7 @@ mrsas_cam_attach(struct mrsas_softc *sc)
 	struct cam_devq *devq;
 	int mrsas_cam_depth;
 
-	mrsas_cam_depth = sc->max_fw_cmds - MRSAS_INTERNAL_CMDS;
+	mrsas_cam_depth = sc->max_scsi_cmds;
 
 	if ((devq = cam_simq_alloc(mrsas_cam_depth)) == NULL) {
 		device_printf(sc->mrsas_dev, "Cannot allocate SIM queue\n");
@@ -452,7 +446,7 @@ static int32_t
 mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim,
     union ccb *ccb)
 {
-	struct mrsas_mpt_cmd *cmd;
+	struct mrsas_mpt_cmd *cmd, *r1_cmd = NULL;
 	struct ccb_hdr *ccb_h = &(ccb->ccb_h);
 	struct ccb_scsiio *csio = &(ccb->csio);
 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
@@ -472,6 +466,7 @@ mrsas_startio(struct mrsas_softc *sc, struct cam_sim *
 		xpt_done(ccb);
 		return (0);
 	}
+
 	if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
 		if (ccb_h->flags & CAM_DIR_IN)
 			cmd->flags |= MRSAS_DIR_IN;
@@ -572,6 +567,7 @@ mrsas_startio(struct mrsas_softc *sc, struct cam_sim *
 		if (mrsas_build_ldio_rw(sc, cmd, ccb)) {
 			device_printf(sc->mrsas_dev, "Build RW LDIO failed.\n");
 			mtx_unlock(&sc->raidmap_lock);
+			mrsas_release_mpt_cmd(cmd);
 			return (1);
 		}
 		break;
@@ -580,6 +576,7 @@ mrsas_startio(struct mrsas_softc *sc, struct cam_sim *
 		if (mrsas_build_ldio_nonrw(sc, cmd, ccb)) {
 			device_printf(sc->mrsas_dev, "Build NON-RW LDIO failed.\n");
 			mtx_unlock(&sc->raidmap_lock);
+			mrsas_release_mpt_cmd(cmd);
 			return (1);
 		}
 		break;
@@ -592,6 +589,7 @@ mrsas_startio(struct mrsas_softc *sc, struct cam_sim *
 				device_printf(sc->mrsas_dev,
 				    "Build SYSPDIO failed.\n");
 				mtx_unlock(&sc->raidmap_lock);
+				mrsas_release_mpt_cmd(cmd);
 				return (1);
 			}
 		} else {
@@ -600,6 +598,7 @@ mrsas_startio(struct mrsas_softc *sc, struct cam_sim *
 				device_printf(sc->mrsas_dev,
 				    "Build SYSPDIO failed.\n");
 				mtx_unlock(&sc->raidmap_lock);
+				mrsas_release_mpt_cmd(cmd);
 				return (1);
 			}
 		}
@@ -622,6 +621,7 @@ mrsas_startio(struct mrsas_softc *sc, struct cam_sim *
 	/*
 	 * Start timer for IO timeout. Default timeout value is 90 second.
 	 */
+	cmd->callout_owner = true;
 #if (__FreeBSD_version >= 1000510)
 	callout_reset_sbt(&cmd->cm_callout, SBT_1S * 180, 0,
 	    mrsas_scsiio_timeout, cmd, 0);
@@ -629,12 +629,34 @@ mrsas_startio(struct mrsas_softc *sc, struct cam_sim *
 	callout_reset(&cmd->cm_callout, (180000 * hz) / 1000,
 	    mrsas_scsiio_timeout, cmd);
 #endif
-	mrsas_atomic_inc(&sc->fw_outstanding);
 
-	if (mrsas_atomic_read(&sc->fw_outstanding) > sc->io_cmds_highwater)
+	if (mrsas_atomic_inc_return(&sc->fw_outstanding) > sc->io_cmds_highwater)
 		sc->io_cmds_highwater++;
 
-	mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high);
+	/*
+	 *  if it is raid 1/10 fp write capable.
+	 *  try to get second command from pool and construct it.
+	 *  From FW, it has confirmed that lba values of two PDs corresponds to
+	 *  single R1/10 LD are always same
+	 *
+	 */
+	/*
+	 * driver side count always should be less than max_fw_cmds to get
+	 * new command
+	 */
+	if (cmd->r1_alt_dev_handle != MR_DEVHANDLE_INVALID) {
+		mrsas_atomic_inc(&sc->fw_outstanding);
+		mrsas_prepare_secondRaid1_IO(sc, cmd);
+		mrsas_fire_cmd(sc, req_desc->addr.u.low,
+			req_desc->addr.u.high);
+		r1_cmd = cmd->peer_cmd;
+		mrsas_fire_cmd(sc, r1_cmd->request_desc->addr.u.low,
+				r1_cmd->request_desc->addr.u.high);
+	} else {
+		mrsas_fire_cmd(sc, req_desc->addr.u.low,
+			req_desc->addr.u.high);
+	}
+
 	return (0);
 
 done:
@@ -698,7 +720,6 @@ mrsas_get_mpt_cmd(struct mrsas_softc *sc)
 	cmd->error_code = 0;
 	cmd->load_balance = 0;
 	cmd->ccb_ptr = NULL;
-
 out:
 	mtx_unlock(&sc->mpt_cmd_pool_lock);
 	return cmd;
@@ -716,7 +737,12 @@ mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd)
 	struct mrsas_softc *sc = cmd->sc;
 
 	mtx_lock(&sc->mpt_cmd_pool_lock);
+	cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
 	cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
+	cmd->peer_cmd = NULL;
+	cmd->cmd_completed = 0;
+	memset((uint8_t *)cmd->io_request, 0,
+		sizeof(MRSAS_RAID_SCSI_IO_REQUEST));
 	TAILQ_INSERT_HEAD(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
 	mtx_unlock(&sc->mpt_cmd_pool_lock);
 
@@ -735,15 +761,65 @@ mrsas_get_request_desc(struct mrsas_softc *sc, u_int16
 {
 	u_int8_t *p;
 
-	if (index >= sc->max_fw_cmds) {
-		device_printf(sc->mrsas_dev, "Invalid SMID (0x%x)request for desc\n", index);
-		return NULL;
-	}
+	KASSERT(index < sc->max_fw_cmds, ("req_desc is out of range"));
 	p = sc->req_desc + sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * index;
 
 	return (MRSAS_REQUEST_DESCRIPTOR_UNION *) p;
 }
 
+
+
+
+/* mrsas_prepare_secondRaid1_IO
+ * It prepares the raid 1 second IO
+ */
+void
+mrsas_prepare_secondRaid1_IO(struct mrsas_softc *sc,
+    struct mrsas_mpt_cmd *cmd)
+{
+	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc, *req_desc2 = NULL;
+	struct mrsas_mpt_cmd *r1_cmd;
+
+	r1_cmd = cmd->peer_cmd;
+	req_desc = cmd->request_desc;
+
+	/*
+	 * copy the io request frame as well as 8 SGEs data for r1
+	 * command
+	 */
+	memcpy(r1_cmd->io_request, cmd->io_request,
+	    (sizeof(MRSAS_RAID_SCSI_IO_REQUEST)));
+	memcpy(&r1_cmd->io_request->SGL, &cmd->io_request->SGL,
+	    (sc->max_sge_in_main_msg * sizeof(MPI2_SGE_IO_UNION)));
+
+	/* sense buffer is different for r1 command */
+	r1_cmd->io_request->SenseBufferLowAddress = r1_cmd->sense_phys_addr;
+	r1_cmd->ccb_ptr = cmd->ccb_ptr;
+
+	req_desc2 = mrsas_get_request_desc(sc, r1_cmd->index - 1);
+	req_desc2->addr.Words = 0;
+	r1_cmd->request_desc = req_desc2;
+	req_desc2->SCSIIO.SMID = r1_cmd->index;
+	req_desc2->SCSIIO.RequestFlags = req_desc->SCSIIO.RequestFlags;
+	r1_cmd->request_desc->SCSIIO.DevHandle = cmd->r1_alt_dev_handle;
+	r1_cmd->r1_alt_dev_handle =  cmd->io_request->DevHandle;
+	r1_cmd->io_request->DevHandle = cmd->r1_alt_dev_handle;
+	cmd->io_request->RaidContext.raid_context_g35.smid.peerSMID =
+	    r1_cmd->index;
+	r1_cmd->io_request->RaidContext.raid_context_g35.smid.peerSMID =
+		cmd->index;
+	/*
+	 * MSIxIndex of both commands request descriptors
+	 * should be same
+	 */
+	r1_cmd->request_desc->SCSIIO.MSIxIndex = cmd->request_desc->SCSIIO.MSIxIndex;
+	/* span arm is different for r1 cmd */
+	r1_cmd->io_request->RaidContext.raid_context_g35.spanArm =
+	    cmd->io_request->RaidContext.raid_context_g35.spanArm + 1;
+
+}
+
+
 /*
  * mrsas_build_ldio_rw:	Builds an LDIO command
  * input:				Adapter instance soft state
@@ -883,11 +959,15 @@ mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mp
 	struct ccb_scsiio *csio = &(ccb->csio);
 	struct IO_REQUEST_INFO io_info;
 	MR_DRV_RAID_MAP_ALL *map_ptr;
+	struct mrsas_mpt_cmd *r1_cmd = NULL;
+
 	MR_LD_RAID *raid;
 	u_int8_t fp_possible;
 	u_int32_t start_lba_hi, start_lba_lo, ld_block_size, ld;
 	u_int32_t datalength = 0;
 
+	io_request->RaidContext.raid_context.VirtualDiskTgtId = device_id;
+
 	start_lba_lo = 0;
 	start_lba_hi = 0;
 	fp_possible = 0;
@@ -947,7 +1027,10 @@ mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mp
 	io_info.ldStartBlock = ((u_int64_t)start_lba_hi << 32) | start_lba_lo;
 	io_info.numBlocks = datalength;
 	io_info.ldTgtId = device_id;
+	io_info.r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
 
+	io_request->DataLength = cmd->length;
+
 	switch (ccb_h->flags & CAM_DIR_MASK) {
 	case CAM_DIR_IN:
 		io_info.isRead = 1;
@@ -980,14 +1063,36 @@ mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mp
 	cmd->request_desc->SCSIIO.MSIxIndex =
 	    sc->msix_vectors ? smp_processor_id() % sc->msix_vectors : 0;
 
-	if (sc->is_ventura && sc->streamDetectByLD) {
-		mtx_lock(&sc->stream_lock);
-		mrsas_stream_detect(sc, cmd, &io_info);
-		mtx_unlock(&sc->stream_lock);
-		/* In ventura if stream detected for a read and it is read ahead capable make this IO as LDIO */
-		if (io_request->RaidContext.raid_context_g35.streamDetected &&
-				io_info.isRead && io_info.raCapable)
-			fp_possible = FALSE;
+	if (sc->is_ventura) {
+		if (sc->streamDetectByLD) {
+			mtx_lock(&sc->stream_lock);
+			mrsas_stream_detect(sc, cmd, &io_info);
+			mtx_unlock(&sc->stream_lock);
+			/* In ventura if stream detected for a read and
+			 * it is read ahead capable make this IO as LDIO */
+			if (io_request->RaidContext.raid_context_g35.streamDetected &&
+					io_info.isRead && io_info.raCapable)
+				fp_possible = FALSE;
+		}
+
+		/* Set raid 1/10 fast path write capable bit in io_info.
+		 * Note - reset peer_cmd and r1_alt_dev_handle if fp_possible
+		 * disabled after this point. Try not to add more check for
+		 * fp_possible toggle after this.
+		 */
+		if (fp_possible &&
+				(io_info.r1_alt_dev_handle != MR_DEVHANDLE_INVALID) &&
+				(raid->level == 1) && !io_info.isRead) {
+			r1_cmd = mrsas_get_mpt_cmd(sc);
+			if (!r1_cmd) {
+				fp_possible = FALSE;
+				printf("Avago debug fp disable from %s %d \n",
+					__func__, __LINE__);
+			} else {
+				cmd->peer_cmd = r1_cmd;
+				r1_cmd->peer_cmd = cmd;
+			}
+		}
 	}
 
 	if (fp_possible) {
@@ -1032,6 +1137,12 @@ mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mp
 				io_request->RaidContext.raid_context.spanArm = io_info.span_arm;
 		} else
 			cmd->load_balance = 0;
+
+		if (sc->is_ventura)
+				cmd->r1_alt_dev_handle = io_info.r1_alt_dev_handle;
+		else
+				cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
+
 		cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle;
 		io_request->DevHandle = io_info.devHandle;
 	} else {
@@ -1078,6 +1189,7 @@ mrsas_build_ldio_nonrw(struct mrsas_softc *sc, struct 
 	u_int32_t device_id, ld;
 	MR_DRV_RAID_MAP_ALL *map_ptr;
 	MR_LD_RAID *raid;
+	RAID_CONTEXT *pRAID_Context;
 	MRSAS_RAID_SCSI_IO_REQUEST *io_request;
 
 	io_request = cmd->io_request;
@@ -1086,6 +1198,8 @@ mrsas_build_ldio_nonrw(struct mrsas_softc *sc, struct 
 	map_ptr = sc->ld_drv_map[(sc->map_id & 1)];
 	ld = MR_TargetIdToLdGet(device_id, map_ptr);
 	raid = MR_LdRaidGet(ld, map_ptr);
+	/* get RAID_Context pointer */
+	pRAID_Context = &io_request->RaidContext.raid_context;
 	/* Store the TM capability value in cmd */
 	cmd->tmCapable = raid->capability.tmCapable;
 
@@ -1140,9 +1254,12 @@ mrsas_build_syspdio(struct mrsas_softc *sc, struct mrs
 	u_int32_t device_id;
 	MR_DRV_RAID_MAP_ALL *local_map_ptr;
 	MRSAS_RAID_SCSI_IO_REQUEST *io_request;
+	RAID_CONTEXT *pRAID_Context;
 	struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
 
 	io_request = cmd->io_request;
+	/* get RAID_Context pointer */
+	pRAID_Context = &io_request->RaidContext.raid_context;
 	device_id = ccb_h->target_id;
 	local_map_ptr = sc->ld_drv_map[(sc->map_id & 1)];
 	io_request->RaidContext.raid_context.RAIDFlags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD
@@ -1171,6 +1288,9 @@ mrsas_build_syspdio(struct mrsas_softc *sc, struct mrs
 		else
 			io_request->RaidContext.raid_context.regLockFlags |=
 			    (MR_RL_FLAGS_SEQ_NUM_ENABLE | MR_RL_FLAGS_GRANT_DESTINATION_CUDA);
+		/* raid_context.Type = MPI2_TYPE_CUDA is valid only,
+		 * if FW support Jbod Sequence number
+		 */
 		io_request->RaidContext.raid_context.Type = MPI2_TYPE_CUDA;
 		io_request->RaidContext.raid_context.nseg = 0x1;
 	} else if (sc->fast_path_io) {
@@ -1185,7 +1305,7 @@ mrsas_build_syspdio(struct mrsas_softc *sc, struct mrs
 		/* Want to send all IO via FW path */
 		io_request->RaidContext.raid_context.VirtualDiskTgtId = device_id;
 		io_request->RaidContext.raid_context.configSeqNum = 0;
-		io_request->DevHandle = 0xFFFF;
+		io_request->DevHandle = MR_DEVHANDLE_INVALID;
 	}
 
 	cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle;
@@ -1432,7 +1552,10 @@ mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mp
 	mrsas_unmap_request(sc, cmd);
 	
 	mtx_lock(&sc->sim_lock);
-	callout_stop(&cmd->cm_callout);
+	if (cmd->callout_owner) {
+		callout_stop(&cmd->cm_callout);
+		cmd->callout_owner  = false;
+	}
 	xpt_done(cmd->ccb_ptr);
 	cmd->ccb_ptr = NULL;
 	mtx_unlock(&sc->sim_lock);

Modified: head/sys/dev/mrsas/mrsas_fp.c
==============================================================================
--- head/sys/dev/mrsas/mrsas_fp.c	Fri Dec 14 08:01:49 2018	(r342063)
+++ head/sys/dev/mrsas/mrsas_fp.c	Fri Dec 14 08:02:44 2018	(r342064)
@@ -921,7 +921,7 @@ mr_spanset_get_phy_params(struct mrsas_softc *sc, u_in
     RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map)
 {
 	MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
-	u_int32_t pd, arRef;
+	u_int32_t pd, arRef, r1_alt_pd;
 	u_int8_t physArm, span;
 	u_int64_t row;
 	u_int8_t retval = TRUE;
@@ -950,10 +950,16 @@ mr_spanset_get_phy_params(struct mrsas_softc *sc, u_in
 	arRef = MR_LdSpanArrayGet(ld, span, map);
 	pd = MR_ArPdGet(arRef, physArm, map);
 
-	if (pd != MR_PD_INVALID)
+	if (pd != MR_PD_INVALID) {
 		*pDevHandle = MR_PdDevHandleGet(pd, map);
-	else {
-		*pDevHandle = MR_PD_INVALID;
+		/* get second pd also for raid 1/10 fast path writes */
+		if ((raid->level == 1) && !io_info->isRead) {
+			r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map);
+			if (r1_alt_pd != MR_PD_INVALID)
+				io_info->r1_alt_dev_handle = MR_PdDevHandleGet(r1_alt_pd, map);
+		}
+	} else {
+		*pDevHandle = MR_DEVHANDLE_INVALID;
 		if ((raid->level >= 5) && ((sc->device_id == MRSAS_TBOLT) ||
 			(sc->mrsas_gen3_ctrl &&
 			raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))
@@ -1167,7 +1173,7 @@ MR_BuildRaidContext(struct mrsas_softc *sc, struct IO_
 		    MR_GetPhyParams(sc, ld, start_strip,
 		    ref_in_start_stripe, io_info, pRAID_Context, map);
 		/* If IO on an invalid Pd, then FP is not possible */
-		if (io_info->devHandle == MR_PD_INVALID)
+		if (io_info->devHandle == MR_DEVHANDLE_INVALID)
 			io_info->fpOkForIo = FALSE;
 		/*
 		 * if FP possible, set the SLUD bit in regLockFlags for
@@ -1178,6 +1184,7 @@ MR_BuildRaidContext(struct mrsas_softc *sc, struct IO_
 		    raid->capability.fpCacheBypassCapable) {
 			((RAID_CONTEXT_G35 *) pRAID_Context)->routingFlags.bits.sld = 1;
 		}
+
 		return retval;
 	} else if (isRead) {
 		for (stripIdx = 0; stripIdx < num_strips; stripIdx++) {
@@ -1535,6 +1542,7 @@ mrsas_get_best_arm_pd(struct mrsas_softc *sc,
 {
 	MR_LD_RAID *raid;
 	MR_DRV_RAID_MAP_ALL *drv_map;
+	u_int16_t pd1_devHandle;
 	u_int16_t pend0, pend1, ld;
 	u_int64_t diff0, diff1;
 	u_int8_t bestArm, pd0, pd1, span, arm;
@@ -1558,23 +1566,30 @@ mrsas_get_best_arm_pd(struct mrsas_softc *sc,
 	pd1 = MR_ArPdGet(arRef, (arm + 1) >= span_row_size ?
 	    (arm + 1 - span_row_size) : arm + 1, drv_map);
 
-	/* get the pending cmds for the data and mirror arms */
-	pend0 = mrsas_atomic_read(&lbInfo->scsi_pending_cmds[pd0]);
-	pend1 = mrsas_atomic_read(&lbInfo->scsi_pending_cmds[pd1]);
+	/* Get PD1 Dev Handle */
+	pd1_devHandle = MR_PdDevHandleGet(pd1, drv_map);
+	if (pd1_devHandle == MR_DEVHANDLE_INVALID) {
+		bestArm = arm;
+	} else {
+		/* get the pending cmds for the data and mirror arms */
+		pend0 = mrsas_atomic_read(&lbInfo->scsi_pending_cmds[pd0]);
+		pend1 = mrsas_atomic_read(&lbInfo->scsi_pending_cmds[pd1]);
 
-	/* Determine the disk whose head is nearer to the req. block */
-	diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[pd0]);
-	diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[pd1]);
-	bestArm = (diff0 <= diff1 ? arm : arm ^ 1);
+		/* Determine the disk whose head is nearer to the req. block */
+		diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[pd0]);
+		diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[pd1]);
+		bestArm = (diff0 <= diff1 ? arm : arm ^ 1);
 
-	if ((bestArm == arm && pend0 > pend1 + sc->lb_pending_cmds) ||
-	    (bestArm != arm && pend1 > pend0 + sc->lb_pending_cmds))
-		bestArm ^= 1;
+		if ((bestArm == arm && pend0 > pend1 + sc->lb_pending_cmds) ||
+		    (bestArm != arm && pend1 > pend0 + sc->lb_pending_cmds))
+			bestArm ^= 1;
 
-	/* Update the last accessed block on the correct pd */
+		/* Update the last accessed block on the correct pd */
+		io_info->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | bestArm;
+		io_info->pd_after_lb = (bestArm == arm) ? pd0 : pd1;
+	}
+
 	lbInfo->last_accessed_block[bestArm == arm ? pd0 : pd1] = block + count - 1;
-	io_info->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | bestArm;
-	io_info->pd_after_lb = (bestArm == arm) ? pd0 : pd1;
 #if SPAN_DEBUG
 	if (arm != bestArm)
 		printf("AVAGO Debug R1 Load balance occur - span 0x%x arm 0x%x bestArm 0x%x "
@@ -1631,7 +1646,7 @@ MR_GetPhyParams(struct mrsas_softc *sc, u_int32_t ld,
     RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map)
 {
 	MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
-	u_int32_t pd, arRef;
+	u_int32_t pd, arRef, r1_alt_pd;
 	u_int8_t physArm, span;
 	u_int64_t row;
 	u_int8_t retval = TRUE;
@@ -1673,11 +1688,17 @@ MR_GetPhyParams(struct mrsas_softc *sc, u_int32_t ld,
 
 	pd = MR_ArPdGet(arRef, physArm, map);	/* Get the Pd. */
 
-	if (pd != MR_PD_INVALID)
+	if (pd != MR_PD_INVALID) {
 		/* Get dev handle from Pd */
 		*pDevHandle = MR_PdDevHandleGet(pd, map);
-	else {
-		*pDevHandle = MR_PD_INVALID;	/* set dev handle as invalid. */
+		/* get second pd also for raid 1/10 fast path writes */
+		if ((raid->level == 1) && !io_info->isRead) {
+			r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map);
+			if (r1_alt_pd != MR_PD_INVALID)
+				io_info->r1_alt_dev_handle = MR_PdDevHandleGet(r1_alt_pd, map);
+		}
+	} else {
+		*pDevHandle = MR_DEVHANDLE_INVALID;	/* set dev handle as invalid. */
 		if ((raid->level >= 5) && ((sc->device_id == MRSAS_TBOLT) ||
 			(sc->mrsas_gen3_ctrl &&
 			raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201812140802.wBE82ip3087646>