From owner-svn-src-all@freebsd.org Fri Dec 14 07:59:10 2018 Return-Path: Delivered-To: svn-src-all@mailman.ysv.freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2610:1c1:1:606c::19:1]) by mailman.ysv.freebsd.org (Postfix) with ESMTP id A89491323B59; Fri, 14 Dec 2018 07:59:10 +0000 (UTC) (envelope-from kadesai@FreeBSD.org) Received: from mxrelay.nyi.freebsd.org (mxrelay.nyi.freebsd.org [IPv6:2610:1c1:1:606c::19:3]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) server-signature RSA-PSS (4096 bits) client-signature RSA-PSS (4096 bits) client-digest SHA256) (Client CN "mxrelay.nyi.freebsd.org", Issuer "Let's Encrypt Authority X3" (verified OK)) by mx1.freebsd.org (Postfix) with ESMTPS id 56DD590EE2; Fri, 14 Dec 2018 07:59:10 +0000 (UTC) (envelope-from kadesai@FreeBSD.org) Received: from repo.freebsd.org (repo.freebsd.org [IPv6:2610:1c1:1:6068::e6a:0]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client did not present a certificate) by mxrelay.nyi.freebsd.org (Postfix) with ESMTPS id 4B8362C536; Fri, 14 Dec 2018 07:59:10 +0000 (UTC) (envelope-from kadesai@FreeBSD.org) Received: from repo.freebsd.org ([127.0.1.37]) by repo.freebsd.org (8.15.2/8.15.2) with ESMTP id wBE7xAMU082759; Fri, 14 Dec 2018 07:59:10 GMT (envelope-from kadesai@FreeBSD.org) Received: (from kadesai@localhost) by repo.freebsd.org (8.15.2/8.15.2/Submit) id wBE7x9cn082757; Fri, 14 Dec 2018 07:59:09 GMT (envelope-from kadesai@FreeBSD.org) Message-Id: <201812140759.wBE7x9cn082757@repo.freebsd.org> X-Authentication-Warning: repo.freebsd.org: kadesai set sender to kadesai@FreeBSD.org using -f From: Kashyap D Desai Date: Fri, 14 Dec 2018 07:59:09 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r342060 - head/sys/dev/mrsas X-SVN-Group: head X-SVN-Commit-Author: kadesai X-SVN-Commit-Paths: head/sys/dev/mrsas X-SVN-Commit-Revision: 342060 X-SVN-Commit-Repository: base MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-Rspamd-Queue-Id: 56DD590EE2 X-Spamd-Bar: - Authentication-Results: mx1.freebsd.org X-Spamd-Result: default: False [-1.54 / 15.00]; local_wl_from(0.00)[FreeBSD.org]; NEURAL_HAM_MEDIUM(-0.82)[-0.818,0]; NEURAL_HAM_SHORT(-0.73)[-0.725,0]; ASN(0.00)[asn:11403, ipnet:2610:1c1:1::/48, country:US] X-BeenThere: svn-src-all@freebsd.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: "SVN commit messages for the entire src tree \(except for " user" and " projects" \)" List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Fri, 14 Dec 2018 07:59:11 -0000 Author: kadesai Date: Fri Dec 14 07:59:09 2018 New Revision: 342060 URL: https://svnweb.freebsd.org/changeset/base/342060 Log: This patch will add support for new Dynamic RaidMap to have different sizes for different number of supported VDs for SAS3.5 MegaRAID adapters. Submitted by: Sumit Saxena Reviewed by: Kashyap Desai Approved by: ken MFC after: 3 days Sponsored by: Broadcom Inc Modified: head/sys/dev/mrsas/mrsas.c head/sys/dev/mrsas/mrsas.h head/sys/dev/mrsas/mrsas_fp.c Modified: head/sys/dev/mrsas/mrsas.c ============================================================================== --- head/sys/dev/mrsas/mrsas.c Fri Dec 14 07:57:00 2018 (r342059) +++ head/sys/dev/mrsas/mrsas.c Fri Dec 14 07:59:09 2018 (r342060) @@ -1757,8 +1757,8 @@ mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_ static int mrsas_alloc_mem(struct mrsas_softc *sc) { - u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size, - chain_frame_size, evt_detail_size, count; + u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size, chain_frame_size, + evt_detail_size, count; /* * Allocate parent DMA tag @@ -2163,7 +2163,7 @@ mrsas_init_fw(struct mrsas_softc *sc) u_int32_t max_sectors_1; u_int32_t max_sectors_2; u_int32_t tmp_sectors; - u_int32_t scratch_pad_2; + u_int32_t scratch_pad_2, scratch_pad_3; int msix_enable = 0; int fw_msix_count = 0; @@ -2172,6 +2172,15 @@ mrsas_init_fw(struct mrsas_softc *sc) if (ret != SUCCESS) { return (ret); } + if (sc->is_ventura) { + scratch_pad_3 = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad_3)); +#if VD_EXT_DEBUG + device_printf(sc->mrsas_dev, "scratch_pad_3 0x%x\n", scratch_pad_3); +#endif + sc->maxRaidMapSize = ((scratch_pad_3 >> + MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) & + MR_MAX_RAID_MAP_SIZE_MASK); + } /* MSI-x index 0- reply post host index register */ sc->msix_reg_offset[0] = MPI2_REPLY_POST_HOST_INDEX_OFFSET; /* Check if MSI-X is supported while in ready state */ @@ -3395,8 +3404,10 @@ dcmd_timeout: static void mrsas_update_ext_vd_details(struct mrsas_softc *sc) { + u_int32_t ventura_map_sz = 0; sc->max256vdSupport = - sc->ctrl_info->adapterOperations3.supportMaxExtLDs; + sc->ctrl_info->adapterOperations3.supportMaxExtLDs; + /* Below is additional check to address future FW enhancement */ if (sc->ctrl_info->max_lds > 64) sc->max256vdSupport = 1; @@ -3413,20 +3424,33 @@ mrsas_update_ext_vd_details(struct mrsas_softc *sc) sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; } - sc->old_map_sz = sizeof(MR_FW_RAID_MAP) + - (sizeof(MR_LD_SPAN_MAP) * - (sc->fw_supported_vd_count - 1)); - sc->new_map_sz = sizeof(MR_FW_RAID_MAP_EXT); - sc->drv_map_sz = sizeof(MR_DRV_RAID_MAP) + - (sizeof(MR_LD_SPAN_MAP) * - (sc->drv_supported_vd_count - 1)); + if (sc->maxRaidMapSize) { + ventura_map_sz = sc->maxRaidMapSize * + MR_MIN_MAP_SIZE; + sc->current_map_sz = ventura_map_sz; + sc->max_map_sz = ventura_map_sz; + } else { + sc->old_map_sz = sizeof(MR_FW_RAID_MAP) + + (sizeof(MR_LD_SPAN_MAP) * (sc->fw_supported_vd_count - 1)); + sc->new_map_sz = sizeof(MR_FW_RAID_MAP_EXT); + sc->max_map_sz = max(sc->old_map_sz, sc->new_map_sz); + if (sc->max256vdSupport) + sc->current_map_sz = sc->new_map_sz; + else + sc->current_map_sz = sc->old_map_sz; + } - sc->max_map_sz = max(sc->old_map_sz, sc->new_map_sz); - - if (sc->max256vdSupport) - sc->current_map_sz = sc->new_map_sz; - else - sc->current_map_sz = sc->old_map_sz; + sc->drv_map_sz = sizeof(MR_DRV_RAID_MAP_ALL); +#if VD_EXT_DEBUG + device_printf(sc->mrsas_dev, "sc->maxRaidMapSize 0x%x \n", + sc->maxRaidMapSize); + device_printf(sc->mrsas_dev, + "new_map_sz = 0x%x, old_map_sz = 0x%x, " + "ventura_map_sz = 0x%x, current_map_sz = 0x%x " + "fusion->drv_map_sz =0x%x, size of driver raid map 0x%lx \n", + sc->new_map_sz, sc->old_map_sz, ventura_map_sz, + sc->current_map_sz, sc->drv_map_sz, sizeof(MR_DRV_RAID_MAP_ALL)); +#endif } /* Modified: head/sys/dev/mrsas/mrsas.h ============================================================================== --- head/sys/dev/mrsas/mrsas.h Fri Dec 14 07:57:00 2018 (r342059) +++ head/sys/dev/mrsas/mrsas.h Fri Dec 14 07:59:09 2018 (r342060) @@ -661,6 +661,7 @@ Mpi2IOCInitRequest_t, MPI2_POINTER pMpi2IOCInitRequest #define MAX_RAIDMAP_ROW_SIZE (MAX_ROW_SIZE) #define MAX_LOGICAL_DRIVES 64 #define MAX_LOGICAL_DRIVES_EXT 256 +#define MAX_LOGICAL_DRIVES_DYN 512 #define MAX_RAIDMAP_LOGICAL_DRIVES (MAX_LOGICAL_DRIVES) #define MAX_RAIDMAP_VIEWS (MAX_LOGICAL_DRIVES) @@ -670,9 +671,11 @@ Mpi2IOCInitRequest_t, MPI2_POINTER pMpi2IOCInitRequest #define MAX_ARRAYS_EXT 256 #define MAX_API_ARRAYS_EXT MAX_ARRAYS_EXT +#define MAX_API_ARRAYS_DYN 512 #define MAX_PHYSICAL_DEVICES 256 #define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES) +#define MAX_RAIDMAP_PHYSICAL_DEVICES_DYN 512 #define MR_DCMD_LD_MAP_GET_INFO 0x0300e101 #define MR_DCMD_SYSTEM_PD_MAP_GET_INFO 0x0200e102 #define MR_DCMD_PD_MFI_TASK_MGMT 0x0200e100 @@ -889,9 +892,9 @@ typedef struct _MR_DRV_RAID_MAP { u_int16_t spanCount; u_int16_t reserve3; - MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES]; - u_int8_t ldTgtIdToLd[MAX_LOGICAL_DRIVES_EXT]; - MR_ARRAY_INFO arMapInfo[MAX_API_ARRAYS_EXT]; + MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES_DYN]; + u_int16_t ldTgtIdToLd[MAX_LOGICAL_DRIVES_DYN]; + MR_ARRAY_INFO arMapInfo[MAX_API_ARRAYS_DYN]; MR_LD_SPAN_MAP ldSpanMap[1]; } MR_DRV_RAID_MAP; @@ -905,7 +908,7 @@ typedef struct _MR_DRV_RAID_MAP { typedef struct _MR_DRV_RAID_MAP_ALL { MR_DRV_RAID_MAP raidMap; - MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_EXT - 1]; + MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_DYN - 1]; } MR_DRV_RAID_MAP_ALL; #pragma pack() @@ -988,6 +991,82 @@ typedef struct _MR_LD_TARGET_SYNC { u_int16_t seqNum; } MR_LD_TARGET_SYNC; + +/* + * RAID Map descriptor Types. + * Each element should uniquely idetify one data structure in the RAID map + */ +typedef enum _MR_RAID_MAP_DESC_TYPE { + RAID_MAP_DESC_TYPE_DEVHDL_INFO = 0, /* MR_DEV_HANDLE_INFO data */ + RAID_MAP_DESC_TYPE_TGTID_INFO = 1, /* target to Ld num Index map */ + RAID_MAP_DESC_TYPE_ARRAY_INFO = 2, /* MR_ARRAY_INFO data */ + RAID_MAP_DESC_TYPE_SPAN_INFO = 3, /* MR_LD_SPAN_MAP data */ + RAID_MAP_DESC_TYPE_COUNT, +} MR_RAID_MAP_DESC_TYPE; + +/* + * This table defines the offset, size and num elements of each descriptor + * type in the RAID Map buffer + */ +typedef struct _MR_RAID_MAP_DESC_TABLE { + /* Raid map descriptor type */ + u_int32_t raidMapDescType; + /* Offset into the RAID map buffer where descriptor data is saved */ + u_int32_t raidMapDescOffset; + /* total size of the descriptor buffer */ + u_int32_t raidMapDescBufferSize; + /* Number of elements contained in the descriptor buffer */ + u_int32_t raidMapDescElements; +} MR_RAID_MAP_DESC_TABLE; + +/* + * Dynamic Raid Map Structure. + */ +typedef struct _MR_FW_RAID_MAP_DYNAMIC { + u_int32_t raidMapSize; + u_int32_t descTableOffset; + u_int32_t descTableSize; + u_int32_t descTableNumElements; + u_int64_t PCIThresholdBandwidth; + u_int32_t reserved2[3]; + + u_int8_t fpPdIoTimeoutSec; + u_int8_t reserved3[3]; + u_int32_t rmwFPSeqNum; + u_int16_t ldCount; + u_int16_t arCount; + u_int16_t spanCount; + u_int16_t reserved4[3]; + + /* + * The below structure of pointers is only to be used by the driver. + * This is added in the API to reduce the amount of code changes needed in + * the driver to support dynamic RAID map. + * Firmware should not update these pointers while preparing the raid map + */ + union { + struct { + MR_DEV_HANDLE_INFO *devHndlInfo; + u_int16_t *ldTgtIdToLd; + MR_ARRAY_INFO *arMapInfo; + MR_LD_SPAN_MAP *ldSpanMap; + } ptrStruct; + u_int64_t ptrStructureSize[RAID_MAP_DESC_TYPE_COUNT]; + } RaidMapDescPtrs; + + /* + * RAID Map descriptor table defines the layout of data in the RAID Map. + * The size of the descriptor table itself could change. + */ + + /* Variable Size descriptor Table. */ + MR_RAID_MAP_DESC_TABLE raidMapDescTable[RAID_MAP_DESC_TYPE_COUNT]; + /* Variable Size buffer containing all data */ + u_int32_t raidMapDescData[1]; + +} MR_FW_RAID_MAP_DYNAMIC; + + #define IEEE_SGE_FLAGS_ADDR_MASK (0x03) #define IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00) #define IEEE_SGE_FLAGS_IOCDDR_ADDR (0x01) @@ -1014,6 +1093,11 @@ struct mrsas_tmp_dcmd { bus_addr_t tmp_dcmd_phys_addr; }; +#define MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT 16 +#define MR_MAX_RAID_MAP_SIZE_MASK 0x1FF +#define MR_MIN_MAP_SIZE 0x10000 + + /******************************************************************* * Register set, included legacy controllers 1068 and 1078, * structure extended for 1078 registers @@ -1053,8 +1137,9 @@ typedef struct _mrsas_register_set { u_int32_t outbound_scratch_pad; /* 00B0h */ u_int32_t outbound_scratch_pad_2; /* 00B4h */ + u_int32_t outbound_scratch_pad_3; /* 00B8h */ - u_int32_t reserved_4[2]; /* 00B8h */ + u_int32_t reserved_4; /* 00BCh */ u_int32_t inbound_low_queue_port; /* 00C0h */ @@ -2919,6 +3004,7 @@ struct mrsas_softc { boolean_t is_ventura; boolean_t msix_combined; + u_int16_t maxRaidMapSize; /* Non dma-able memory. Driver local copy. */ MR_DRV_RAID_MAP_ALL *ld_drv_map[2]; Modified: head/sys/dev/mrsas/mrsas_fp.c ============================================================================== --- head/sys/dev/mrsas/mrsas_fp.c Fri Dec 14 07:57:00 2018 (r342059) +++ head/sys/dev/mrsas/mrsas_fp.c Fri Dec 14 07:59:09 2018 (r342060) @@ -67,7 +67,7 @@ MR_GetPhyParams(struct mrsas_softc *sc, u_int32_t ld, u_int64_t stripRow, u_int16_t stripRef, struct IO_REQUEST_INFO *io_info, RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map); -u_int16_t MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map); +u_int8_t MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL *map); u_int32_t MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map); u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map); u_int16_t @@ -103,7 +103,7 @@ static MR_SPAN_BLOCK_INFO * MR_LdSpanInfoGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map); MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map); -void MR_PopulateDrvRaidMap(struct mrsas_softc *sc); +static int MR_PopulateDrvRaidMap(struct mrsas_softc *sc); /* @@ -237,7 +237,7 @@ MR_LdSpanInfoGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * m return &map->raidMap.ldSpanMap[ld].spanBlock[0]; } -u_int16_t +u_int8_t MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map) { return map->raidMap.ldTgtIdToLd[ldTgtId]; @@ -266,26 +266,203 @@ MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_A } /* + * This function will Populate Driver Map using Dynamic firmware raid map + */ +static int +MR_PopulateDrvRaidMapVentura(struct mrsas_softc *sc) +{ + unsigned int i, j; + u_int16_t ld_count; + + MR_FW_RAID_MAP_DYNAMIC *fw_map_dyn; + MR_RAID_MAP_DESC_TABLE *desc_table; + MR_DRV_RAID_MAP_ALL *drv_map = sc->ld_drv_map[(sc->map_id & 1)]; + MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap; + void *raid_map_data = NULL; + + fw_map_dyn = (MR_FW_RAID_MAP_DYNAMIC *) sc->raidmap_mem[(sc->map_id & 1)]; + + if (fw_map_dyn == NULL) { + device_printf(sc->mrsas_dev, + "from %s %d map0 %p map1 %p map size %d \n", __func__, __LINE__, + sc->raidmap_mem[0], sc->raidmap_mem[1], sc->maxRaidMapSize); + return 1; + } +#if VD_EXT_DEBUG + device_printf(sc->mrsas_dev, + " raidMapSize 0x%x, descTableOffset 0x%x, " + " descTableSize 0x%x, descTableNumElements 0x%x \n", + fw_map_dyn->raidMapSize, fw_map_dyn->descTableOffset, + fw_map_dyn->descTableSize, fw_map_dyn->descTableNumElements); +#endif + desc_table = (MR_RAID_MAP_DESC_TABLE *) ((char *)fw_map_dyn + + fw_map_dyn->descTableOffset); + if (desc_table != fw_map_dyn->raidMapDescTable) { + device_printf(sc->mrsas_dev, + "offsets of desc table are not matching returning " + " FW raid map has been changed: desc %p original %p\n", + desc_table, fw_map_dyn->raidMapDescTable); + } + memset(drv_map, 0, sc->drv_map_sz); + ld_count = fw_map_dyn->ldCount; + pDrvRaidMap->ldCount = ld_count; + pDrvRaidMap->fpPdIoTimeoutSec = fw_map_dyn->fpPdIoTimeoutSec; + pDrvRaidMap->totalSize = sizeof(MR_DRV_RAID_MAP_ALL); + /* point to actual data starting point */ + raid_map_data = (char *)fw_map_dyn + + fw_map_dyn->descTableOffset + fw_map_dyn->descTableSize; + + for (i = 0; i < fw_map_dyn->descTableNumElements; ++i) { + if (!desc_table) { + device_printf(sc->mrsas_dev, + "desc table is null, coming out %p \n", desc_table); + return 1; + } +#if VD_EXT_DEBUG + device_printf(sc->mrsas_dev, "raid_map_data %p \n", raid_map_data); + device_printf(sc->mrsas_dev, + "desc table %p \n", desc_table); + device_printf(sc->mrsas_dev, + "raidmap type %d, raidmapOffset 0x%x, " + " raid map number of elements 0%x, raidmapsize 0x%x\n", + desc_table->raidMapDescType, desc_table->raidMapDescOffset, + desc_table->raidMapDescElements, desc_table->raidMapDescBufferSize); +#endif + switch (desc_table->raidMapDescType) { + case RAID_MAP_DESC_TYPE_DEVHDL_INFO: + fw_map_dyn->RaidMapDescPtrs.ptrStruct.devHndlInfo = (MR_DEV_HANDLE_INFO *) + ((char *)raid_map_data + desc_table->raidMapDescOffset); +#if VD_EXT_DEBUG + device_printf(sc->mrsas_dev, + "devHndlInfo address %p\n", fw_map_dyn->RaidMapDescPtrs.ptrStruct.devHndlInfo); +#endif + memcpy(pDrvRaidMap->devHndlInfo, fw_map_dyn->RaidMapDescPtrs.ptrStruct.devHndlInfo, + sizeof(MR_DEV_HANDLE_INFO) * desc_table->raidMapDescElements); + break; + case RAID_MAP_DESC_TYPE_TGTID_INFO: + fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldTgtIdToLd = (u_int16_t *) + ((char *)raid_map_data + desc_table->raidMapDescOffset); +#if VD_EXT_DEBUG + device_printf(sc->mrsas_dev, + "ldTgtIdToLd address %p\n", fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldTgtIdToLd); +#endif + for (j = 0; j < desc_table->raidMapDescElements; j++) { + pDrvRaidMap->ldTgtIdToLd[j] = fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldTgtIdToLd[j]; +#if VD_EXT_DEBUG + device_printf(sc->mrsas_dev, + " %d drv ldTgtIdToLd %d\n", j, pDrvRaidMap->ldTgtIdToLd[j]); +#endif + } + break; + case RAID_MAP_DESC_TYPE_ARRAY_INFO: + fw_map_dyn->RaidMapDescPtrs.ptrStruct.arMapInfo = (MR_ARRAY_INFO *) ((char *)raid_map_data + + desc_table->raidMapDescOffset); +#if VD_EXT_DEBUG + device_printf(sc->mrsas_dev, + "arMapInfo address %p\n", fw_map_dyn->RaidMapDescPtrs.ptrStruct.arMapInfo); +#endif + memcpy(pDrvRaidMap->arMapInfo, fw_map_dyn->RaidMapDescPtrs.ptrStruct.arMapInfo, + sizeof(MR_ARRAY_INFO) * desc_table->raidMapDescElements); + break; + case RAID_MAP_DESC_TYPE_SPAN_INFO: + fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldSpanMap = (MR_LD_SPAN_MAP *) ((char *)raid_map_data + + desc_table->raidMapDescOffset); + memcpy(pDrvRaidMap->ldSpanMap, fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldSpanMap, + sizeof(MR_LD_SPAN_MAP) * desc_table->raidMapDescElements); +#if VD_EXT_DEBUG + device_printf(sc->mrsas_dev, + "ldSpanMap address %p\n", fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldSpanMap); + device_printf(sc->mrsas_dev, + "MR_LD_SPAN_MAP size 0x%lx\n", sizeof(MR_LD_SPAN_MAP)); + for (j = 0; j < ld_count; j++) { + printf("mrsas(%d) : fw_map_dyn->ldSpanMap[%d].ldRaid.targetId 0x%x " + "fw_map_dyn->ldSpanMap[%d].ldRaid.seqNum 0x%x size 0x%x\n", + j, j, fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldSpanMap[j].ldRaid.targetId, j, + fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldSpanMap[j].ldRaid.seqNum, + (u_int32_t)fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldSpanMap[j].ldRaid.rowSize); + printf("mrsas(%d) : pDrvRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x " + "pDrvRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x size 0x%x\n", + j, j, pDrvRaidMap->ldSpanMap[j].ldRaid.targetId, j, + pDrvRaidMap->ldSpanMap[j].ldRaid.seqNum, + (u_int32_t)pDrvRaidMap->ldSpanMap[j].ldRaid.rowSize); + printf("mrsas : drv raid map all %p raid map %p LD RAID MAP %p/%p\n", + drv_map, pDrvRaidMap, &fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldSpanMap[j].ldRaid, + &pDrvRaidMap->ldSpanMap[j].ldRaid); + } +#endif + break; + default: + device_printf(sc->mrsas_dev, + "wrong number of desctableElements %d\n", + fw_map_dyn->descTableNumElements); + } + ++desc_table; + } + return 0; +} + +/* * This function will Populate Driver Map using firmware raid map */ -void +static int MR_PopulateDrvRaidMap(struct mrsas_softc *sc) { MR_FW_RAID_MAP_ALL *fw_map_old = NULL; + MR_FW_RAID_MAP_EXT *fw_map_ext; MR_FW_RAID_MAP *pFwRaidMap = NULL; unsigned int i; + u_int16_t ld_count; MR_DRV_RAID_MAP_ALL *drv_map = sc->ld_drv_map[(sc->map_id & 1)]; MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap; - if (sc->max256vdSupport) { - memcpy(sc->ld_drv_map[sc->map_id & 1], - sc->raidmap_mem[sc->map_id & 1], - sc->current_map_sz); - /* - * New Raid map will not set totalSize, so keep expected - * value for legacy code in ValidateMapInfo - */ + if (sc->maxRaidMapSize) { + return MR_PopulateDrvRaidMapVentura(sc); + } else if (sc->max256vdSupport) { + fw_map_ext = (MR_FW_RAID_MAP_EXT *) sc->raidmap_mem[(sc->map_id & 1)]; + ld_count = (u_int16_t)(fw_map_ext->ldCount); + if (ld_count > MAX_LOGICAL_DRIVES_EXT) { + device_printf(sc->mrsas_dev, + "mrsas: LD count exposed in RAID map in not valid\n"); + return 1; + } +#if VD_EXT_DEBUG + for (i = 0; i < ld_count; i++) { + printf("mrsas : Index 0x%x Target Id 0x%x Seq Num 0x%x Size 0/%lx\n", + i, fw_map_ext->ldSpanMap[i].ldRaid.targetId, + fw_map_ext->ldSpanMap[i].ldRaid.seqNum, + fw_map_ext->ldSpanMap[i].ldRaid.size); + } +#endif + memset(drv_map, 0, sc->drv_map_sz); + pDrvRaidMap->ldCount = ld_count; + pDrvRaidMap->fpPdIoTimeoutSec = fw_map_ext->fpPdIoTimeoutSec; + for (i = 0; i < (MAX_LOGICAL_DRIVES_EXT); i++) { + pDrvRaidMap->ldTgtIdToLd[i] = (u_int16_t)fw_map_ext->ldTgtIdToLd[i]; + } + memcpy(pDrvRaidMap->ldSpanMap, fw_map_ext->ldSpanMap, sizeof(MR_LD_SPAN_MAP) * ld_count); +#if VD_EXT_DEBUG + for (i = 0; i < ld_count; i++) { + printf("mrsas(%d) : fw_map_ext->ldSpanMap[%d].ldRaid.targetId 0x%x " + "fw_map_ext->ldSpanMap[%d].ldRaid.seqNum 0x%x size 0x%x\n", + i, i, fw_map_ext->ldSpanMap[i].ldRaid.targetId, i, + fw_map_ext->ldSpanMap[i].ldRaid.seqNum, + (u_int32_t)fw_map_ext->ldSpanMap[i].ldRaid.rowSize); + printf("mrsas(%d) : pDrvRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x" + "pDrvRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x size 0x%x\n", i, i, + pDrvRaidMap->ldSpanMap[i].ldRaid.targetId, i, + pDrvRaidMap->ldSpanMap[i].ldRaid.seqNum, + (u_int32_t)pDrvRaidMap->ldSpanMap[i].ldRaid.rowSize); + printf("mrsas : drv raid map all %p raid map %p LD RAID MAP %p/%p\n", + drv_map, pDrvRaidMap, &fw_map_ext->ldSpanMap[i].ldRaid, + &pDrvRaidMap->ldSpanMap[i].ldRaid); + } +#endif + memcpy(pDrvRaidMap->arMapInfo, fw_map_ext->arMapInfo, + sizeof(MR_ARRAY_INFO) * MAX_API_ARRAYS_EXT); + memcpy(pDrvRaidMap->devHndlInfo, fw_map_ext->devHndlInfo, + sizeof(MR_DEV_HANDLE_INFO) * MAX_RAIDMAP_PHYSICAL_DEVICES); + pDrvRaidMap->totalSize = sizeof(MR_FW_RAID_MAP_EXT); } else { fw_map_old = (MR_FW_RAID_MAP_ALL *) sc->raidmap_mem[(sc->map_id & 1)]; @@ -339,6 +516,7 @@ MR_PopulateDrvRaidMap(struct mrsas_softc *sc) sizeof(MR_DEV_HANDLE_INFO) * MAX_RAIDMAP_PHYSICAL_DEVICES); } + return 0; } /* @@ -354,7 +532,8 @@ MR_ValidateMapInfo(struct mrsas_softc *sc) if (!sc) { return 1; } - MR_PopulateDrvRaidMap(sc); + if (MR_PopulateDrvRaidMap(sc)) + return 0; MR_DRV_RAID_MAP_ALL *drv_map = sc->ld_drv_map[(sc->map_id & 1)]; MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap; @@ -365,7 +544,9 @@ MR_ValidateMapInfo(struct mrsas_softc *sc) pDrvRaidMap = &drv_map->raidMap; PLD_SPAN_INFO ldSpanInfo = (PLD_SPAN_INFO) & sc->log_to_span; - if (sc->max256vdSupport) + if (sc->maxRaidMapSize) + expected_map_size = sizeof(MR_DRV_RAID_MAP_ALL); + else if (sc->max256vdSupport) expected_map_size = sizeof(MR_FW_RAID_MAP_EXT); else expected_map_size =