Skip site navigation (1)Skip section navigation (2)
Date:      Sun, 28 Oct 2012 10:57:24 +0000 (UTC)
From:      Xin LI <delphij@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-8@freebsd.org
Subject:   svn commit: r242216 - in stable/8: share/man/man4 sys/dev/hptiop
Message-ID:  <201210281057.q9SAvOXL089574@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: delphij
Date: Sun Oct 28 10:57:24 2012
New Revision: 242216
URL: http://svn.freebsd.org/changeset/base/242216

Log:
  MFC r242086:
  
  Update hptiop(4) to version 1.8, which added support for HighPoint
  RocketRAID 4500 series.
  
  Many thanks to HighPoint Technologies for their continued support
  of FreeBSD!
  
  Submitted by:	HighPoint Technologies

Modified:
  stable/8/share/man/man4/hptiop.4
  stable/8/sys/dev/hptiop/hptiop.c
  stable/8/sys/dev/hptiop/hptiop.h
Directory Properties:
  stable/8/share/man/man4/   (props changed)
  stable/8/sys/   (props changed)
  stable/8/sys/dev/   (props changed)
  stable/8/sys/dev/hptiop/   (props changed)

Modified: stable/8/share/man/man4/hptiop.4
==============================================================================
--- stable/8/share/man/man4/hptiop.4	Sun Oct 28 10:33:19 2012	(r242215)
+++ stable/8/share/man/man4/hptiop.4	Sun Oct 28 10:57:24 2012	(r242216)
@@ -24,7 +24,7 @@
 .\"
 .\" $FreeBSD$
 .\"
-.Dd August 5, 2012
+.Dd October 25, 2012
 .Dt HPTIOP 4
 .Os
 .Sh NAME
@@ -58,6 +58,10 @@ driver supports the following SAS and SA
 .Pp
 .Bl -bullet -compact
 .It
+HighPoint RocketRAID 4522
+.It
+HighPoint RocketRAID 4520
+.It
 HighPoint RocketRAID 4322
 .It
 HighPoint RocketRAID 4321

Modified: stable/8/sys/dev/hptiop/hptiop.c
==============================================================================
--- stable/8/sys/dev/hptiop/hptiop.c	Sun Oct 28 10:33:19 2012	(r242215)
+++ stable/8/sys/dev/hptiop/hptiop.c	Sun Oct 28 10:57:24 2012	(r242216)
@@ -1,6 +1,6 @@
 /*
  * HighPoint RR3xxx/4xxx RAID Driver for FreeBSD
- * Copyright (C) 2007-2008 HighPoint Technologies, Inc. All Rights Reserved.
+ * Copyright (C) 2007-2012 HighPoint Technologies, Inc. All Rights Reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -89,8 +89,8 @@ __FBSDID("$FreeBSD$");
 
 #include <dev/hptiop/hptiop.h>
 
-static char driver_name[] = "hptiop";
-static char driver_version[] = "v1.3 (010208)";
+static const char driver_name[] = "hptiop";
+static const char driver_version[] = "v1.8";
 
 static devclass_t hptiop_devclass;
 
@@ -99,41 +99,62 @@ static int hptiop_send_sync_msg(struct h
 static void hptiop_request_callback_itl(struct hpt_iop_hba *hba,
 							u_int32_t req);
 static void hptiop_request_callback_mv(struct hpt_iop_hba *hba, u_int64_t req);
+static void hptiop_request_callback_mvfrey(struct hpt_iop_hba *hba,
+							u_int32_t req);
 static void hptiop_os_message_callback(struct hpt_iop_hba *hba, u_int32_t msg);
 static int  hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
 				struct hpt_iop_ioctl_param *pParams);
 static int  hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
 				struct hpt_iop_ioctl_param *pParams);
+static int  hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba,
+				struct hpt_iop_ioctl_param *pParams);
 static int  hptiop_rescan_bus(struct hpt_iop_hba *hba);
 static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba);
 static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba);
+static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba);
 static int hptiop_get_config_itl(struct hpt_iop_hba *hba,
 				struct hpt_iop_request_get_config *config);
 static int hptiop_get_config_mv(struct hpt_iop_hba *hba,
 				struct hpt_iop_request_get_config *config);
+static int hptiop_get_config_mvfrey(struct hpt_iop_hba *hba,
+				struct hpt_iop_request_get_config *config);
 static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
 				struct hpt_iop_request_set_config *config);
 static int hptiop_set_config_mv(struct hpt_iop_hba *hba,
 				struct hpt_iop_request_set_config *config);
+static int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba,
+				struct hpt_iop_request_set_config *config);
 static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba);
+static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba);
+static int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba);
 static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba);
+static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba);
 static int  hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
 			u_int32_t req32, struct hpt_iop_ioctl_param *pParams);
 static int  hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
 				struct hpt_iop_request_ioctl_command *req,
 				struct hpt_iop_ioctl_param *pParams);
+static int  hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba,
+				struct hpt_iop_request_ioctl_command *req,
+				struct hpt_iop_ioctl_param *pParams);
 static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
 				struct hpt_iop_srb *srb,
 				bus_dma_segment_t *segs, int nsegs);
 static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
 				struct hpt_iop_srb *srb,
 				bus_dma_segment_t *segs, int nsegs);
+static void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba,
+				struct hpt_iop_srb *srb,
+				bus_dma_segment_t *segs, int nsegs);
 static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg);
 static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg);
+static void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg);
 static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba);
 static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba);
+static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba);
 static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba);
 static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba);
+static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba);
 static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb);
 static int  hptiop_os_query_remove_device(struct hpt_iop_hba *hba, int tid);
 static int  hptiop_probe(device_t dev);
@@ -146,8 +167,7 @@ static void hptiop_async(void *callback_
 					struct cam_path *path, void *arg);
 static void hptiop_pci_intr(void *arg);
 static void hptiop_release_resource(struct hpt_iop_hba *hba);
-static int  hptiop_reset_adapter(struct hpt_iop_hba *hba);
-
+static void hptiop_reset_adapter(void *argv);
 static d_open_t hptiop_open;
 static d_close_t hptiop_close;
 static d_ioctl_t hptiop_ioctl;
@@ -193,6 +213,11 @@ static struct cdevsw hptiop_cdevsw = {
 #define BUS_SPACE_RD4_MV2(offset) bus_space_read_4(hba->bar2t,\
 		hba->bar2h, offsetof(struct hpt_iopmu_mv, offset))
 
+#define BUS_SPACE_WRT4_MVFREY2(offset, value) bus_space_write_4(hba->bar2t,\
+		hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset), value)
+#define BUS_SPACE_RD4_MVFREY2(offset) bus_space_read_4(hba->bar2t,\
+		hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset))
+
 static int hptiop_open(ioctl_dev_t dev, int flags,
 					int devtype, ioctl_thread_t proc)
 {
@@ -294,6 +319,12 @@ static void hptiop_post_msg_mv(struct hp
 	BUS_SPACE_RD4_MV0(outbound_intmask);
 }
 
+static void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg)
+{
+	BUS_SPACE_WRT4_MVFREY2(f0_to_cpu_msg_a, msg);
+	BUS_SPACE_RD4_MVFREY2(f0_to_cpu_msg_a);
+}
+
 static int hptiop_wait_ready_itl(struct hpt_iop_hba * hba, u_int32_t millisec)
 {
 	u_int32_t req=0;
@@ -323,6 +354,15 @@ static int hptiop_wait_ready_mv(struct h
 	return 0;
 }
 
+static int hptiop_wait_ready_mvfrey(struct hpt_iop_hba * hba,
+							u_int32_t millisec)
+{
+	if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec))
+		return -1;
+
+	return 0;
+}
+
 static void hptiop_request_callback_itl(struct hpt_iop_hba * hba,
 							u_int32_t index)
 {
@@ -618,6 +658,111 @@ scsi_done:
 	}
 }
 
+static void hptiop_request_callback_mvfrey(struct hpt_iop_hba * hba,
+				u_int32_t _tag)
+{
+	u_int32_t req_type = _tag & 0xf;
+
+	struct hpt_iop_srb *srb;
+	struct hpt_iop_request_scsi_command *req;
+	union ccb *ccb;
+	u_int8_t *cdb;
+
+	switch (req_type) {
+	case IOP_REQUEST_TYPE_GET_CONFIG:
+	case IOP_REQUEST_TYPE_SET_CONFIG:
+		hba->config_done = 1;
+		break;
+
+	case IOP_REQUEST_TYPE_SCSI_COMMAND:
+		srb = hba->srb[(_tag >> 4) & 0xff];
+		req = (struct hpt_iop_request_scsi_command *)srb;
+
+		ccb = (union ccb *)srb->ccb;
+
+		untimeout(hptiop_reset_adapter, hba, ccb->ccb_h.timeout_ch);
+
+		if (ccb->ccb_h.flags & CAM_CDB_POINTER)
+			cdb = ccb->csio.cdb_io.cdb_ptr;
+		else
+			cdb = ccb->csio.cdb_io.cdb_bytes;
+
+		if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
+			ccb->ccb_h.status = CAM_REQ_CMP;
+			goto scsi_done;
+		}
+
+		if (_tag & MVFREYIOPMU_QUEUE_REQUEST_RESULT_BIT)
+			req->header.result = IOP_RESULT_SUCCESS;
+
+		switch (req->header.result) {
+		case IOP_RESULT_SUCCESS:
+			switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
+			case CAM_DIR_IN:
+				bus_dmamap_sync(hba->io_dmat,
+						srb->dma_map, BUS_DMASYNC_POSTREAD);
+				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
+				break;
+			case CAM_DIR_OUT:
+				bus_dmamap_sync(hba->io_dmat,
+						srb->dma_map, BUS_DMASYNC_POSTWRITE);
+				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
+				break;
+			}
+			ccb->ccb_h.status = CAM_REQ_CMP;
+			break;
+		case IOP_RESULT_BAD_TARGET:
+			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
+			break;
+		case IOP_RESULT_BUSY:
+			ccb->ccb_h.status = CAM_BUSY;
+			break;
+		case IOP_RESULT_INVALID_REQUEST:
+			ccb->ccb_h.status = CAM_REQ_INVALID;
+			break;
+		case IOP_RESULT_FAIL:
+			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
+			break;
+		case IOP_RESULT_RESET:
+			ccb->ccb_h.status = CAM_BUSY;
+			break;
+		case IOP_RESULT_CHECK_CONDITION:
+			memset(&ccb->csio.sense_data, 0,
+			       sizeof(ccb->csio.sense_data));
+			if (req->dataxfer_length < ccb->csio.sense_len)
+				ccb->csio.sense_resid = ccb->csio.sense_len -
+				req->dataxfer_length;
+			else
+				ccb->csio.sense_resid = 0;
+			memcpy(&ccb->csio.sense_data, &req->sg_list, 
+			       MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data)));
+			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
+			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
+			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
+			break;
+		default:
+			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
+			break;
+		}
+scsi_done:
+		ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length;
+		
+		hptiop_free_srb(hba, srb);
+		xpt_done(ccb);
+		break;
+	case IOP_REQUEST_TYPE_IOCTL_COMMAND:
+		if (_tag & MVFREYIOPMU_QUEUE_REQUEST_RESULT_BIT)
+			hba->config_done = 1;
+		else
+			hba->config_done = -1;
+		wakeup((struct hpt_iop_request_ioctl_command *)hba->ctlcfg_ptr);
+		break;
+	default:
+		device_printf(hba->pcidev, "wrong callback type\n");
+		break;
+	}
+}
+
 static void hptiop_drain_outbound_queue_mv(struct hpt_iop_hba * hba)
 {
 	u_int64_t req;
@@ -656,6 +801,50 @@ static int hptiop_intr_mv(struct hpt_iop
 	return ret;
 }
 
+static int hptiop_intr_mvfrey(struct hpt_iop_hba * hba)
+{
+	u_int32_t status, _tag, cptr;
+	int ret = 0;
+
+	if (hba->initialized) {
+		BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0);
+	}
+
+	status = BUS_SPACE_RD4_MVFREY2(f0_doorbell);
+	if (status) {
+		BUS_SPACE_WRT4_MVFREY2(f0_doorbell, status);
+		if (status & CPU_TO_F0_DRBL_MSG_A_BIT) {
+			u_int32_t msg = BUS_SPACE_RD4_MVFREY2(cpu_to_f0_msg_a);
+			hptiop_os_message_callback(hba, msg);
+		}
+		ret = 1;
+	}
+
+	status = BUS_SPACE_RD4_MVFREY2(isr_cause);
+	if (status) {
+		BUS_SPACE_WRT4_MVFREY2(isr_cause, status);
+		do {
+			cptr = *hba->u.mvfrey.outlist_cptr & 0xff;
+			while (hba->u.mvfrey.outlist_rptr != cptr) {
+				hba->u.mvfrey.outlist_rptr++;
+				if (hba->u.mvfrey.outlist_rptr == hba->u.mvfrey.list_count) {
+					hba->u.mvfrey.outlist_rptr = 0;
+				}
+	
+				_tag = hba->u.mvfrey.outlist[hba->u.mvfrey.outlist_rptr].val;
+				hptiop_request_callback_mvfrey(hba, _tag);
+				ret = 2;
+			}
+		} while (cptr != (*hba->u.mvfrey.outlist_cptr & 0xff));
+	}
+
+	if (hba->initialized) {
+		BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0x1010);
+	}
+
+	return ret;
+}
+
 static int hptiop_send_sync_request_itl(struct hpt_iop_hba * hba,
 					u_int32_t req32, u_int32_t millisec)
 {
@@ -702,6 +891,48 @@ static int hptiop_send_sync_request_mv(s
 	return -1;
 }
 
+static int hptiop_send_sync_request_mvfrey(struct hpt_iop_hba *hba,
+					void *req, u_int32_t millisec)
+{
+	u_int32_t i, index;
+	u_int64_t phy_addr;
+	struct hpt_iop_request_header *reqhdr =
+										(struct hpt_iop_request_header *)req;
+	
+	hba->config_done = 0;
+
+	phy_addr = hba->ctlcfgcmd_phy;
+	reqhdr->flags = IOP_REQUEST_FLAG_SYNC_REQUEST
+					| IOP_REQUEST_FLAG_OUTPUT_CONTEXT
+					| IOP_REQUEST_FLAG_ADDR_BITS
+					| ((phy_addr >> 16) & 0xffff0000);
+	reqhdr->context = ((phy_addr & 0xffffffff) << 32 )
+					| IOPMU_QUEUE_ADDR_HOST_BIT | reqhdr->type;
+
+	hba->u.mvfrey.inlist_wptr++;
+	index = hba->u.mvfrey.inlist_wptr & 0x3fff;
+
+	if (index == hba->u.mvfrey.list_count) {
+		index = 0;
+		hba->u.mvfrey.inlist_wptr &= ~0x3fff;
+		hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
+	}
+
+	hba->u.mvfrey.inlist[index].addr = phy_addr;
+	hba->u.mvfrey.inlist[index].intrfc_len = (reqhdr->size + 3) / 4;
+
+	BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
+	BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
+
+	for (i = 0; i < millisec; i++) {
+		hptiop_intr_mvfrey(hba);
+		if (hba->config_done)
+			return 0;
+		DELAY(1000);
+	}
+	return -1;
+}
+
 static int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
 					u_int32_t msg, u_int32_t millisec)
 {
@@ -776,6 +1007,37 @@ static int hptiop_get_config_mv(struct h
 	return 0;
 }
 
+static int hptiop_get_config_mvfrey(struct hpt_iop_hba * hba,
+				struct hpt_iop_request_get_config * config)
+{
+	struct hpt_iop_request_get_config *info = hba->u.mvfrey.config;
+
+	if (info->header.size != sizeof(struct hpt_iop_request_get_config) ||
+	    info->header.type != IOP_REQUEST_TYPE_GET_CONFIG) {
+		KdPrint(("hptiop: header size %x/%x type %x/%x",
+			 info->header.size, (int)sizeof(struct hpt_iop_request_get_config),
+			 info->header.type, IOP_REQUEST_TYPE_GET_CONFIG));
+		return -1;
+	}
+
+	config->interface_version = info->interface_version;
+	config->firmware_version = info->firmware_version;
+	config->max_requests = info->max_requests;
+	config->request_size = info->request_size;
+	config->max_sg_count = info->max_sg_count;
+	config->data_transfer_length = info->data_transfer_length;
+	config->alignment_mask = info->alignment_mask;
+	config->max_devices = info->max_devices;
+	config->sdram_size = info->sdram_size;
+
+	KdPrint(("hptiop: maxreq %x reqsz %x datalen %x maxdev %x sdram %x",
+		 config->max_requests, config->request_size,
+		 config->data_transfer_length, config->max_devices,
+		 config->sdram_size));
+
+	return 0;
+}
+
 static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
 				struct hpt_iop_request_set_config *config)
 {
@@ -833,6 +1095,31 @@ static int hptiop_set_config_mv(struct h
 	return 0;
 }
 
+static int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba,
+				struct hpt_iop_request_set_config *config)
+{
+	struct hpt_iop_request_set_config *req;
+
+	if (!(req = hba->ctlcfg_ptr))
+		return -1;
+
+	memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header),
+		(u_int8_t *)config + sizeof(struct hpt_iop_request_header),
+		sizeof(struct hpt_iop_request_set_config) -
+			sizeof(struct hpt_iop_request_header));
+
+	req->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
+	req->header.size = sizeof(struct hpt_iop_request_set_config);
+	req->header.result = IOP_RESULT_PENDING;
+
+	if (hptiop_send_sync_request_mvfrey(hba, req, 20000)) {
+		KdPrint(("hptiop: set config send cmd failed"));
+		return -1;
+	}
+
+	return 0;
+}
+
 static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
 				u_int32_t req32,
 				struct hpt_iop_ioctl_param *pParams)
@@ -884,7 +1171,8 @@ static int hptiop_post_ioctl_command_itl
 	return 0;
 }
 
-static int hptiop_bus_space_copyin(struct hpt_iop_hba *hba, u_int32_t bus, void *user, int size)
+static int hptiop_bus_space_copyin(struct hpt_iop_hba *hba, u_int32_t bus,
+									void *user, int size)
 {
 	unsigned char byte;
 	int i;
@@ -898,7 +1186,8 @@ static int hptiop_bus_space_copyin(struc
 	return 0;
 }
 
-static int hptiop_bus_space_copyout(struct hpt_iop_hba *hba, u_int32_t bus, void *user, int size)
+static int hptiop_bus_space_copyout(struct hpt_iop_hba *hba, u_int32_t bus,
+									void *user, int size)
 {
 	unsigned char byte;
 	int i;
@@ -1045,6 +1334,103 @@ invalid:
 	}
 }
 
+static int hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba,
+				struct hpt_iop_request_ioctl_command *req,
+				struct hpt_iop_ioctl_param *pParams)
+{
+	u_int64_t phy_addr;
+	u_int32_t index;
+
+	phy_addr = hba->ctlcfgcmd_phy;
+
+	if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
+			(hba->max_request_size -
+			offsetof(struct hpt_iop_request_ioctl_command, buf))) {
+		device_printf(hba->pcidev, "request size beyond max value");
+		return -1;
+	}
+
+	req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
+	req->inbuf_size = pParams->nInBufferSize;
+	req->outbuf_size = pParams->nOutBufferSize;
+	req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
+					+ pParams->nInBufferSize;
+
+	req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
+	req->header.result = IOP_RESULT_PENDING;
+
+	req->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST
+						| IOP_REQUEST_FLAG_OUTPUT_CONTEXT
+						| IOP_REQUEST_FLAG_ADDR_BITS
+						| ((phy_addr >> 16) & 0xffff0000);
+	req->header.context = ((phy_addr & 0xffffffff) << 32 )
+						| IOPMU_QUEUE_ADDR_HOST_BIT | req->header.type;
+
+	hba->u.mvfrey.inlist_wptr++;
+	index = hba->u.mvfrey.inlist_wptr & 0x3fff;
+
+	if (index == hba->u.mvfrey.list_count) {
+		index = 0;
+		hba->u.mvfrey.inlist_wptr &= ~0x3fff;
+		hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
+	}
+
+	hba->u.mvfrey.inlist[index].addr = phy_addr;
+	hba->u.mvfrey.inlist[index].intrfc_len = (req->header.size + 3) / 4;
+
+	BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
+	BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
+
+	while (hba->config_done == 0) {
+		if (hptiop_sleep(hba, req, PPAUSE,
+			"hptctl", HPT_OSM_TIMEOUT)==0)
+			continue;
+		hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
+	}
+	return 0;
+}
+
+static int hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba,
+				struct hpt_iop_ioctl_param *pParams)
+{
+	struct hpt_iop_request_ioctl_command *req;
+
+	if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
+		(pParams->Magic != HPT_IOCTL_MAGIC32))
+		return EFAULT;
+
+	req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr);
+	hba->config_done = 0;
+	hptiop_lock_adapter(hba);
+	if (pParams->nInBufferSize)
+		if (copyin((void *)pParams->lpInBuffer,
+				req->buf, pParams->nInBufferSize))
+			goto invalid;
+	if (hptiop_post_ioctl_command_mvfrey(hba, req, pParams))
+		goto invalid;
+
+	if (hba->config_done == 1) {
+		if (pParams->nOutBufferSize)
+			if (copyout(req->buf +
+				((pParams->nInBufferSize + 3) & ~3),
+				(void *)pParams->lpOutBuffer,
+				pParams->nOutBufferSize))
+				goto invalid;
+
+		if (pParams->lpBytesReturned)
+			if (copyout(&req->bytes_returned,
+				(void*)pParams->lpBytesReturned,
+				sizeof(u_int32_t)))
+				goto invalid;
+		hptiop_unlock_adapter(hba);
+		return 0;
+	} else{
+invalid:
+		hptiop_unlock_adapter(hba);
+		return EFAULT;
+	}
+}
+
 static int  hptiop_rescan_bus(struct hpt_iop_hba * hba)
 {
 	union ccb           *ccb;
@@ -1063,6 +1449,7 @@ static int  hptiop_rescan_bus(struct hpt
 static  bus_dmamap_callback_t   hptiop_map_srb;
 static  bus_dmamap_callback_t   hptiop_post_scsi_command;
 static  bus_dmamap_callback_t   hptiop_mv_map_ctlcfg;
+static	bus_dmamap_callback_t	hptiop_mvfrey_map_ctlcfg;
 
 static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba)
 {
@@ -1139,6 +1526,56 @@ static int hptiop_alloc_pci_res_mv(struc
 	return 0;
 }
 
+static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba)
+{
+	hba->bar0_rid = 0x10;
+	hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
+			SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
+
+	if (hba->bar0_res == NULL) {
+		device_printf(hba->pcidev, "failed to get iop bar0.\n");
+		return -1;
+	}
+	hba->bar0t = rman_get_bustag(hba->bar0_res);
+	hba->bar0h = rman_get_bushandle(hba->bar0_res);
+	hba->u.mvfrey.config = (struct hpt_iop_request_get_config *)
+				rman_get_virtual(hba->bar0_res);
+
+	if (!hba->u.mvfrey.config) {
+		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
+					hba->bar0_rid, hba->bar0_res);
+		device_printf(hba->pcidev, "alloc bar0 mem res failed\n");
+		return -1;
+	}
+
+	hba->bar2_rid = 0x18;
+	hba->bar2_res = bus_alloc_resource_any(hba->pcidev,
+			SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE);
+
+	if (hba->bar2_res == NULL) {
+		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
+					hba->bar0_rid, hba->bar0_res);
+		device_printf(hba->pcidev, "failed to get iop bar2.\n");
+		return -1;
+	}
+
+	hba->bar2t = rman_get_bustag(hba->bar2_res);
+	hba->bar2h = rman_get_bushandle(hba->bar2_res);
+	hba->u.mvfrey.mu =
+					(struct hpt_iopmu_mvfrey *)rman_get_virtual(hba->bar2_res);
+
+	if (!hba->u.mvfrey.mu) {
+		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
+					hba->bar0_rid, hba->bar0_res);
+		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
+					hba->bar2_rid, hba->bar2_res);
+		device_printf(hba->pcidev, "alloc mem bar2 res failed\n");
+		return -1;
+	}
+
+	return 0;
+}
+
 static void hptiop_release_pci_res_itl(struct hpt_iop_hba *hba)
 {
 	if (hba->bar0_res)
@@ -1156,6 +1593,16 @@ static void hptiop_release_pci_res_mv(st
 			hba->bar2_rid, hba->bar2_res);
 }
 
+static void hptiop_release_pci_res_mvfrey(struct hpt_iop_hba *hba)
+{
+	if (hba->bar0_res)
+		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
+			hba->bar0_rid, hba->bar0_res);
+	if (hba->bar2_res)
+		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
+			hba->bar2_rid, hba->bar2_res);
+}
+
 static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba)
 {
 	if (bus_dma_tag_create(hba->parent_dmat,
@@ -1172,7 +1619,7 @@ static int hptiop_internal_memalloc_mv(s
 				NULL,
 				NULL,
 #endif
-                           		&hba->ctlcfg_dmat)) {
+				&hba->ctlcfg_dmat)) {
 		device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n");
 		return -1;
 	}
@@ -1205,6 +1652,72 @@ static int hptiop_internal_memalloc_mv(s
 	return 0;
 }
 
+static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba)
+{
+	u_int32_t list_count = BUS_SPACE_RD4_MVFREY2(inbound_conf_ctl);
+
+	list_count >>= 16;
+
+	if (list_count == 0) {
+		return -1;
+	}
+
+	hba->u.mvfrey.list_count = list_count;
+	hba->u.mvfrey.internal_mem_size = 0x800
+							+ list_count * sizeof(struct mvfrey_inlist_entry)
+							+ list_count * sizeof(struct mvfrey_outlist_entry)
+							+ sizeof(int);
+	if (bus_dma_tag_create(hba->parent_dmat,
+				1,
+				0,
+				BUS_SPACE_MAXADDR_32BIT,
+				BUS_SPACE_MAXADDR,
+				NULL, NULL,
+				hba->u.mvfrey.internal_mem_size,
+				1,
+				BUS_SPACE_MAXSIZE_32BIT,
+				BUS_DMA_ALLOCNOW,
+#if __FreeBSD_version > 502000
+				NULL,
+				NULL,
+#endif
+				&hba->ctlcfg_dmat)) {
+		device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n");
+		return -1;
+	}
+
+	if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr,
+#if __FreeBSD_version>501000
+		BUS_DMA_WAITOK | BUS_DMA_COHERENT,
+#else
+		BUS_DMA_WAITOK,
+#endif
+		&hba->ctlcfg_dmamap) != 0) {
+			device_printf(hba->pcidev,
+					"bus_dmamem_alloc failed!\n");
+			bus_dma_tag_destroy(hba->ctlcfg_dmat);
+			return -1;
+	}
+
+	if (bus_dmamap_load(hba->ctlcfg_dmat,
+			hba->ctlcfg_dmamap, hba->ctlcfg_ptr,
+			hba->u.mvfrey.internal_mem_size,
+			hptiop_mvfrey_map_ctlcfg, hba, 0)) {
+		device_printf(hba->pcidev, "bus_dmamap_load failed!\n");
+		if (hba->ctlcfg_dmat)
+			bus_dmamem_free(hba->ctlcfg_dmat,
+				hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
+			bus_dma_tag_destroy(hba->ctlcfg_dmat);
+		return -1;
+	}
+
+	return 0;
+}
+
+static int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba) {
+	return 0;
+}
+
 static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba)
 {
 	if (hba->ctlcfg_dmat) {
@@ -1217,6 +1730,54 @@ static int hptiop_internal_memfree_mv(st
 	return 0;
 }
 
+static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba)
+{
+	if (hba->ctlcfg_dmat) {
+		bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
+		bus_dmamem_free(hba->ctlcfg_dmat,
+					hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
+		bus_dma_tag_destroy(hba->ctlcfg_dmat);
+	}
+
+	return 0;
+}
+
+static int hptiop_reset_comm_mvfrey(struct hpt_iop_hba *hba)
+{
+	u_int32_t i = 100;
+
+	if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET_COMM, 3000))
+		return -1;
+
+	/* wait 100ms for MCU ready */
+	while(i--) {
+		DELAY(1000);
+	}
+
+	BUS_SPACE_WRT4_MVFREY2(inbound_base,
+							hba->u.mvfrey.inlist_phy & 0xffffffff);
+	BUS_SPACE_WRT4_MVFREY2(inbound_base_high,
+							(hba->u.mvfrey.inlist_phy >> 16) >> 16);
+
+	BUS_SPACE_WRT4_MVFREY2(outbound_base,
+							hba->u.mvfrey.outlist_phy & 0xffffffff);
+	BUS_SPACE_WRT4_MVFREY2(outbound_base_high,
+							(hba->u.mvfrey.outlist_phy >> 16) >> 16);
+
+	BUS_SPACE_WRT4_MVFREY2(outbound_shadow_base,
+							hba->u.mvfrey.outlist_cptr_phy & 0xffffffff);
+	BUS_SPACE_WRT4_MVFREY2(outbound_shadow_base_high,
+							(hba->u.mvfrey.outlist_cptr_phy >> 16) >> 16);
+
+	hba->u.mvfrey.inlist_wptr = (hba->u.mvfrey.list_count - 1)
+								| CL_POINTER_TOGGLE;
+	*hba->u.mvfrey.outlist_cptr = (hba->u.mvfrey.list_count - 1)
+								| CL_POINTER_TOGGLE;
+	hba->u.mvfrey.outlist_rptr = hba->u.mvfrey.list_count - 1;
+	
+	return 0;
+}
+
 /*
  * CAM driver interface
  */
@@ -1230,9 +1791,10 @@ static device_method_t driver_methods[] 
 };
 
 static struct hptiop_adapter_ops hptiop_itl_ops = {
+	.family	           = INTEL_BASED_IOP,
 	.iop_wait_ready    = hptiop_wait_ready_itl,
 	.internal_memalloc = 0,
-	.internal_memfree  = 0,
+	.internal_memfree  = hptiop_internal_memfree_itl,
 	.alloc_pci_res     = hptiop_alloc_pci_res_itl,
 	.release_pci_res   = hptiop_release_pci_res_itl,
 	.enable_intr       = hptiop_enable_intr_itl,
@@ -1243,9 +1805,11 @@ static struct hptiop_adapter_ops hptiop_
 	.post_msg          = hptiop_post_msg_itl,
 	.post_req          = hptiop_post_req_itl,
 	.do_ioctl          = hptiop_do_ioctl_itl,
+	.reset_comm        = 0,
 };
 
 static struct hptiop_adapter_ops hptiop_mv_ops = {
+	.family	           = MV_BASED_IOP,
 	.iop_wait_ready    = hptiop_wait_ready_mv,
 	.internal_memalloc = hptiop_internal_memalloc_mv,
 	.internal_memfree  = hptiop_internal_memfree_mv,
@@ -1259,6 +1823,25 @@ static struct hptiop_adapter_ops hptiop_
 	.post_msg          = hptiop_post_msg_mv,
 	.post_req          = hptiop_post_req_mv,
 	.do_ioctl          = hptiop_do_ioctl_mv,
+	.reset_comm        = 0,
+};
+
+static struct hptiop_adapter_ops hptiop_mvfrey_ops = {
+	.family	           = MVFREY_BASED_IOP,
+	.iop_wait_ready    = hptiop_wait_ready_mvfrey,
+	.internal_memalloc = hptiop_internal_memalloc_mvfrey,
+	.internal_memfree  = hptiop_internal_memfree_mvfrey,
+	.alloc_pci_res     = hptiop_alloc_pci_res_mvfrey,
+	.release_pci_res   = hptiop_release_pci_res_mvfrey,
+	.enable_intr       = hptiop_enable_intr_mvfrey,
+	.disable_intr      = hptiop_disable_intr_mvfrey,
+	.get_config        = hptiop_get_config_mvfrey,
+	.set_config        = hptiop_set_config_mvfrey,
+	.iop_intr          = hptiop_intr_mvfrey,
+	.post_msg          = hptiop_post_msg_mvfrey,
+	.post_req          = hptiop_post_req_mvfrey,
+	.do_ioctl          = hptiop_do_ioctl_mvfrey,
+	.reset_comm        = hptiop_reset_comm_mvfrey,
 };
 
 static driver_t hptiop_pci_driver = {
@@ -1284,6 +1867,11 @@ static int hptiop_probe(device_t dev)
 	id = pci_get_device(dev);
 
 	switch (id) {
+		case 0x4520:
+		case 0x4522:
+			sas = 1;
+			ops = &hptiop_mvfrey_ops;
+			break;
 		case 0x4210:
 		case 0x4211:
 		case 0x4310:
@@ -1385,7 +1973,7 @@ static int hptiop_attach(device_t dev)
 		goto release_pci_res;
 	}
 
-	if (hba->ops->internal_memalloc) {
+	if (hba->ops->family == MV_BASED_IOP) {
 		if (hba->ops->internal_memalloc(hba)) {
 			device_printf(dev, "alloc srb_dmat failed\n");
 			goto destroy_parent_tag;
@@ -1404,6 +1992,17 @@ static int hptiop_attach(device_t dev)
 	hba->max_request_size = iop_config.request_size;
 	hba->max_sg_count = iop_config.max_sg_count;
 
+	if (hba->ops->family == MVFREY_BASED_IOP) {
+		if (hba->ops->internal_memalloc(hba)) {
+			device_printf(dev, "alloc srb_dmat failed\n");
+			goto destroy_parent_tag;
+		}
+		if (hba->ops->reset_comm(hba)) {
+			device_printf(dev, "reset comm failed\n");
+			goto get_config_failed;
+		}
+	}
+
 	if (bus_dma_tag_create(hba->parent_dmat,/* parent */
 			4,  /* alignment */
 			BUS_SPACE_MAXADDR_32BIT+1, /* boundary */
@@ -1542,6 +2141,7 @@ static int hptiop_attach(device_t dev)
 	}
 
 	hba->ops->enable_intr(hba);
+	hba->initialized = 1;
 
 	hba->ioctl_dev = make_dev(&hptiop_cdevsw, unit,
 				UID_ROOT, GID_WHEEL /*GID_OPERATOR*/,
@@ -1587,8 +2187,7 @@ destroy_io_dmat:
 		bus_dma_tag_destroy(hba->io_dmat);
 
 get_config_failed:
-	if (hba->ops->internal_memfree)
-		hba->ops->internal_memfree(hba);
+	hba->ops->internal_memfree(hba);
 
 destroy_parent_tag:
 	if (hba->parent_dmat)
@@ -1682,6 +2281,18 @@ static void hptiop_enable_intr_mv(struct
     	BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask);
 }
 
+static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba)
+{
+	BUS_SPACE_WRT4_MVFREY2(f0_doorbell_enable, CPU_TO_F0_DRBL_MSG_A_BIT);
+	BUS_SPACE_RD4_MVFREY2(f0_doorbell_enable);
+
+	BUS_SPACE_WRT4_MVFREY2(isr_enable, 0x1);
+	BUS_SPACE_RD4_MVFREY2(isr_enable);
+
+	BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0x1010);
+	BUS_SPACE_RD4_MVFREY2(pcie_f0_int_enable);
+}
+
 static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba)
 {
 	u_int32_t int_mask;
@@ -1704,9 +2315,24 @@ static void hptiop_disable_intr_mv(struc
 	BUS_SPACE_RD4_MV0(outbound_intmask);
 }
 
-static int hptiop_reset_adapter(struct hpt_iop_hba * hba)
+static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba)
+{
+	BUS_SPACE_WRT4_MVFREY2(f0_doorbell_enable, 0);
+	BUS_SPACE_RD4_MVFREY2(f0_doorbell_enable);
+
+	BUS_SPACE_WRT4_MVFREY2(isr_enable, 0);
+	BUS_SPACE_RD4_MVFREY2(isr_enable);
+
+	BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0);
+	BUS_SPACE_RD4_MVFREY2(pcie_f0_int_enable);
+}
+
+static void hptiop_reset_adapter(void *argv)
 {
-	return hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
+	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)argv;
+	if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000))
+		return;
+	hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000);
 }
 
 static void *hptiop_get_srb(struct hpt_iop_hba * hba)
@@ -1910,7 +2536,8 @@ static void hptiop_post_req_itl(struct h
 
 		bcopy(cdb, req.cdb, ccb->csio.cdb_len);
 
-		req.header.size = offsetof(struct hpt_iop_request_scsi_command, sg_list)
+		req.header.size =
+				offsetof(struct hpt_iop_request_scsi_command, sg_list)
 				+ nsegs*sizeof(struct hpt_iopsg);
 		req.header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
 		req.header.flags = 0;
@@ -1956,7 +2583,8 @@ static void hptiop_post_req_itl(struct h
 		req->channel =  0;
 		req->target =  ccb->ccb_h.target_id;
 		req->lun =  ccb->ccb_h.target_lun;
-		req->header.size = offsetof(struct hpt_iop_request_scsi_command, sg_list)
+		req->header.size =
+			offsetof(struct hpt_iop_request_scsi_command, sg_list)
 			+ nsegs*sizeof(struct hpt_iopsg);
 		req->header.context = (u_int64_t)srb->index |
 						IOPMU_QUEUE_ADDR_HOST_BIT;
@@ -2044,6 +2672,78 @@ static void hptiop_post_req_mv(struct hp
 			| (size > 3 ? 3 : size), hba);
 }
 
+static void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba,
+				struct hpt_iop_srb *srb,
+				bus_dma_segment_t *segs, int nsegs)
+{
+	int idx, index;
+	union ccb *ccb = srb->ccb;
+	u_int8_t *cdb;
+	struct hpt_iop_request_scsi_command *req;
+	u_int64_t req_phy;
+
+	req = (struct hpt_iop_request_scsi_command *)srb;
+	req_phy = srb->phy_addr;
+
+	if (ccb->csio.dxfer_len && nsegs > 0) {
+		struct hpt_iopsg *psg = req->sg_list;
+		for (idx = 0; idx < nsegs; idx++, psg++) {
+			psg->pci_address = (u_int64_t)segs[idx].ds_addr | 1;
+			psg->size = segs[idx].ds_len;
+			psg->eot = 0;
+		}
+		psg[-1].eot = 1;
+	}
+	if (ccb->ccb_h.flags & CAM_CDB_POINTER)
+		cdb = ccb->csio.cdb_io.cdb_ptr;
+	else
+		cdb = ccb->csio.cdb_io.cdb_bytes;
+
+	bcopy(cdb, req->cdb, ccb->csio.cdb_len);
+	req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
+	req->header.result = IOP_RESULT_PENDING;
+	req->dataxfer_length = ccb->csio.dxfer_len;
+	req->channel = 0;
+	req->target = ccb->ccb_h.target_id;
+	req->lun = ccb->ccb_h.target_lun;
+	req->header.size = sizeof(struct hpt_iop_request_scsi_command)
+				- sizeof(struct hpt_iopsg)
+				+ nsegs * sizeof(struct hpt_iopsg);
+	if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
+		bus_dmamap_sync(hba->io_dmat,
+			srb->dma_map, BUS_DMASYNC_PREREAD);
+	}

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201210281057.q9SAvOXL089574>