From owner-p4-projects@FreeBSD.ORG Mon Apr 30 04:43:57 2007 Return-Path: X-Original-To: p4-projects@freebsd.org Delivered-To: p4-projects@freebsd.org Received: by hub.freebsd.org (Postfix, from userid 32767) id 846A916A403; Mon, 30 Apr 2007 04:43:57 +0000 (UTC) X-Original-To: perforce@freebsd.org Delivered-To: perforce@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [69.147.83.52]) by hub.freebsd.org (Postfix) with ESMTP id 1503016A406 for ; Mon, 30 Apr 2007 04:43:57 +0000 (UTC) (envelope-from mjacob@freebsd.org) Received: from repoman.freebsd.org (repoman.freebsd.org [69.147.83.41]) by mx1.freebsd.org (Postfix) with ESMTP id 04EC313C44C for ; Mon, 30 Apr 2007 04:43:57 +0000 (UTC) (envelope-from mjacob@freebsd.org) Received: from repoman.freebsd.org (localhost [127.0.0.1]) by repoman.freebsd.org (8.13.8/8.13.8) with ESMTP id l3U4huMt067714 for ; Mon, 30 Apr 2007 04:43:56 GMT (envelope-from mjacob@freebsd.org) Received: (from perforce@localhost) by repoman.freebsd.org (8.13.8/8.13.8/Submit) id l3U4huhw067704 for perforce@freebsd.org; Mon, 30 Apr 2007 04:43:56 GMT (envelope-from mjacob@freebsd.org) Date: Mon, 30 Apr 2007 04:43:56 GMT Message-Id: <200704300443.l3U4huhw067704@repoman.freebsd.org> X-Authentication-Warning: repoman.freebsd.org: perforce set sender to mjacob@freebsd.org using -f From: Matt Jacob To: Perforce Change Reviews Cc: Subject: PERFORCE change 118969 for review X-BeenThere: p4-projects@freebsd.org X-Mailman-Version: 2.1.5 Precedence: list List-Id: p4 projects tree changes List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Mon, 30 Apr 2007 04:43:57 -0000 http://perforce.freebsd.org/chv.cgi?CH=118969 Change 118969 by mjacob@mjexp on 2007/04/30 04:42:54 Borrow scott's graciously offered leg up on SMP for mpt. Affected files ... .. //depot/projects/mjexp/sys/dev/mpt/mpt.c#7 integrate .. //depot/projects/mjexp/sys/dev/mpt/mpt.h#11 integrate .. //depot/projects/mjexp/sys/dev/mpt/mpt_cam.c#19 integrate .. //depot/projects/mjexp/sys/dev/mpt/mpt_pci.c#11 integrate .. //depot/projects/mjexp/sys/dev/mpt/mpt_raid.c#5 integrate Differences ... ==== //depot/projects/mjexp/sys/dev/mpt/mpt.c#7 (text+ko) ==== @@ -704,6 +704,8 @@ mpt = (struct mpt_softc *)arg; mpt_lprt(mpt, MPT_PRT_DEBUG2, "enter mpt_intr\n"); + MPT_LOCK_ASSERT(mpt); + while ((reply_desc = mpt_pop_reply_queue(mpt)) != MPT_REPLY_EMPTY) { request_t *req; MSG_DEFAULT_REPLY *reply_frame; @@ -1171,6 +1173,7 @@ } KASSERT(req->state != REQ_STATE_FREE, ("freeing free request")); KASSERT(!(req->state & REQ_STATE_LOCKED), ("freeing locked request")); + MPT_LOCK_ASSERT(mpt); KASSERT(mpt_req_on_free_list(mpt, req) == 0, ("mpt_free_request: req %p:%u func %x already on freelist", req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function)); @@ -1219,6 +1222,7 @@ request_t *req; retry: + MPT_LOCK_ASSERT(mpt); req = TAILQ_FIRST(&mpt->request_free_list); if (req != NULL) { KASSERT(req == &mpt->request_pool[req->index], @@ -2105,18 +2109,20 @@ int mpt_core_attach(struct mpt_softc *mpt) { - int val; + int val, error; LIST_INIT(&mpt->ack_frames); /* Put all request buffers on the free list */ TAILQ_INIT(&mpt->request_pending_list); TAILQ_INIT(&mpt->request_free_list); TAILQ_INIT(&mpt->request_timeout_list); + MPT_LOCK(mpt); for (val = 0; val < MPT_MAX_REQUESTS(mpt); val++) { request_t *req = &mpt->request_pool[val]; req->state = REQ_STATE_ALLOCATED; mpt_free_request(mpt, req); } + MPT_UNLOCK(mpt); for (val = 0; val < MPT_MAX_LUNS; val++) { STAILQ_INIT(&mpt->trt[val].atios); STAILQ_INIT(&mpt->trt[val].inots); @@ -2130,7 +2136,12 @@ mpt_sysctl_attach(mpt); mpt_lprt(mpt, MPT_PRT_DEBUG, "doorbell req = %s\n", mpt_ioc_diag(mpt_read(mpt, MPT_OFFSET_DOORBELL))); - return (mpt_configure_ioc(mpt, 0, 0)); + + MPT_LOCK(mpt); + error = mpt_configure_ioc(mpt, 0, 0); + MPT_UNLOCK(mpt); + + return (error); } int @@ -2141,6 +2152,7 @@ * not enabled, ports not enabled and interrupts * not enabled. */ + MPT_LOCK(mpt); /* * Enable asynchronous event reporting- all personalities @@ -2175,8 +2187,10 @@ */ if (mpt_send_port_enable(mpt, 0) != MPT_OK) { mpt_prt(mpt, "failed to enable port 0\n"); + MPT_UNLOCK(mpt); return (ENXIO); } + MPT_UNLOCK(mpt); return (0); } ==== //depot/projects/mjexp/sys/dev/mpt/mpt.h#11 (text+ko) ==== @@ -241,7 +241,7 @@ bus_dma_tag_create(parent_tag, alignment, boundary, \ lowaddr, highaddr, filter, filterarg, \ maxsize, nsegments, maxsegsz, flags, \ - busdma_lock_mutex, &Giant, \ + busdma_lock_mutex, &(mpt)->mpt_lock, \ dma_tagp) #else #define mpt_dma_tag_create(mpt, parent_tag, alignment, boundary, \ @@ -280,7 +280,7 @@ /****************************** Timer Facilities ******************************/ #if __FreeBSD_version > 500000 -#define mpt_callout_init(c) callout_init(c, /*mpsafe*/0); +#define mpt_callout_init(c) callout_init(c, /*mpsafe*/1); #else #define mpt_callout_init(c) callout_init(c); #endif @@ -337,6 +337,7 @@ bus_addr_t sense_pbuf; /* Physical Address of sense data */ bus_dmamap_t dmap; /* DMA map for data buffers */ struct req_entry *chain; /* for SGE overallocations */ + struct callout callout; /* Timeout for the request */ }; /**************************** MPI Target State Info ***************************/ @@ -527,7 +528,7 @@ unit : 8, ready : 1, fw_uploaded : 1, - : 1, + msi_enable : 1, twildcard : 1, tenabled : 1, do_cfg_role : 1, @@ -740,6 +741,10 @@ #define MPT_IFLAGS INTR_TYPE_CAM #define MPT_LOCK(mpt) mpt_lockspl(mpt) #define MPT_UNLOCK(mpt) mpt_unlockspl(mpt) +#define MPT_OWNED(mpt) mpt->mpt_islocked +#define MPT_LOCK_ASSERT(mpt) +#define MPTLOCK_2_CAMLOCK MPT_UNLOCK +#define CAMLOCK_2_MPTLOCK MPT_LOCK #define MPT_LOCK_SETUP(mpt) #define MPT_LOCK_DESTROY(mpt) @@ -791,15 +796,55 @@ } #else +#if 1 +#define MPT_IFLAGS INTR_TYPE_CAM | INTR_ENTROPY | INTR_MPSAFE +#define MPT_LOCK_SETUP(mpt) \ + mtx_init(&mpt->mpt_lock, "mpt", NULL, MTX_DEF); \ + mpt->mpt_locksetup = 1 +#define MPT_LOCK_DESTROY(mpt) \ + if (mpt->mpt_locksetup) { \ + mtx_destroy(&mpt->mpt_lock); \ + mpt->mpt_locksetup = 0; \ + } + +#define MPT_LOCK(mpt) mtx_lock(&(mpt)->mpt_lock) +#define MPT_UNLOCK(mpt) mtx_unlock(&(mpt)->mpt_lock) +#define MPT_OWNED(mpt) mtx_owned(&(mpt)->mpt_lock) +#define MPT_LOCK_ASSERT(mpt) mtx_assert(&(mpt)->mpt_lock, MA_OWNED) +#define MPTLOCK_2_CAMLOCK(mpt) +#define CAMLOCK_2_MPTLOCK(mpt) +#define mpt_sleep(mpt, ident, priority, wmesg, timo) \ + msleep(ident, &(mpt)->mpt_lock, priority, wmesg, timo) +#define mpt_req_timeout(req, ticks, func, arg) \ + callout_reset(&(req)->callout, (ticks), (func), (arg)); +#define mpt_req_untimeout(req, func, arg) \ + callout_stop(&(req)->callout) +#define mpt_req_timeout_init(req) \ + callout_init(&(req)->callout, 1) + +#else + #define MPT_IFLAGS INTR_TYPE_CAM | INTR_ENTROPY #define MPT_LOCK_SETUP(mpt) do { } while (0) #define MPT_LOCK_DESTROY(mpt) do { } while (0) -#define MPT_LOCK(mpt) do { } while (0) -#define MPT_UNLOCK(mpt) do { } while (0) +#define MPT_LOCK_ASSERT(mpt) mtx_assert(&Giant, MA_OWNED) +#define MPT_LOCK(mpt) mtx_lock(&Giant) +#define MPT_UNLOCK(mpt) mtx_unlock(&Giant) +#define MPTLOCK_2_CAMLOCK(mpt) +#define CAMLOCK_2_MPTLOCK(mpt) static __inline int mpt_sleep(struct mpt_softc *, void *, int, const char *, int); +#define mpt_ccb_timeout(ccb, ticks, func, arg) \ + do { \ + (ccb)->ccb_h.timeout_ch = timeout((func), (arg), (ticks)); \ + } while (0) +#define mpt_ccb_untimeout(ccb, func, arg) \ + untimeout((func), (arg), (ccb)->ccb_h.timeout_ch) +#define mpt_ccb_timeout_init(ccb) \ + callout_handle_init(&(ccb)->ccb_h.timeout_ch) + static __inline int mpt_sleep(struct mpt_softc *mpt, void *i, int p, const char *w, int t) { @@ -808,6 +853,7 @@ return (r); } #endif +#endif /******************************* Register Access ******************************/ static __inline void mpt_write(struct mpt_softc *, size_t, uint32_t); ==== //depot/projects/mjexp/sys/dev/mpt/mpt_cam.c#19 (text+ko) ==== @@ -217,6 +217,7 @@ int maxq; int error; + MPT_LOCK(mpt); TAILQ_INIT(&mpt->request_timeout_list); maxq = (mpt->ioc_facts.GlobalCredits < MPT_MAX_REQUESTS(mpt))? mpt->ioc_facts.GlobalCredits : MPT_MAX_REQUESTS(mpt); @@ -225,14 +226,16 @@ error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, &scsi_io_handler_id); if (error != 0) { - goto cleanup0; + MPT_UNLOCK(mpt); + goto cleanup; } handler.reply_handler = mpt_scsi_tmf_reply_handler; error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, &scsi_tmf_handler_id); if (error != 0) { - goto cleanup0; + MPT_UNLOCK(mpt); + goto cleanup; } /* @@ -244,11 +247,13 @@ error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, &fc_els_handler_id); if (error != 0) { - goto cleanup0; + MPT_UNLOCK(mpt); + goto cleanup; } if (mpt_add_els_buffers(mpt) == FALSE) { error = ENOMEM; - goto cleanup0; + MPT_UNLOCK(mpt); + goto cleanup; } maxq -= mpt->els_cmds_allocated; } @@ -263,7 +268,8 @@ error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, &mpt->scsi_tgt_handler_id); if (error != 0) { - goto cleanup0; + MPT_UNLOCK(mpt); + goto cleanup; } } @@ -274,7 +280,8 @@ if (mpt->tmf_req == NULL) { mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n"); error = ENOMEM; - goto cleanup0; + MPT_UNLOCK(mpt); + goto cleanup; } /* @@ -286,17 +293,18 @@ mpt->tmf_req->state = REQ_STATE_FREE; maxq--; + /* + * The rest of this is CAM foo, for which we need to drop our lock + */ + MPT_UNLOCK(mpt); + if (mpt_spawn_recovery_thread(mpt) != 0) { mpt_prt(mpt, "Unable to spawn recovery thread!\n"); error = ENOMEM; - goto cleanup0; + goto cleanup; } /* - * The rest of this is CAM foo, for which we need to drop our lock - */ - - /* * Create the device queue for our SIM(s). */ devq = cam_simq_alloc(maxq); @@ -310,7 +318,7 @@ * Construct our SIM entry. */ mpt->sim = cam_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, - mpt->unit, &Giant, 1, maxq, devq); + mpt->unit, &mpt->mpt_lock, 1, maxq, devq); if (mpt->sim == NULL) { mpt_prt(mpt, "Unable to allocate CAM SIM!\n"); cam_simq_free(devq); @@ -321,9 +329,11 @@ /* * Register exactly this bus. */ + MPT_LOCK(mpt); if (xpt_bus_register(mpt->sim, 0) != CAM_SUCCESS) { mpt_prt(mpt, "Bus registration Failed!\n"); error = ENOMEM; + MPT_UNLOCK(mpt); goto cleanup; } @@ -331,8 +341,10 @@ CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { mpt_prt(mpt, "Unable to allocate Path!\n"); error = ENOMEM; + MPT_UNLOCK(mpt); goto cleanup; } + MPT_UNLOCK(mpt); /* * Only register a second bus for RAID physical @@ -346,7 +358,7 @@ * Create a "bus" to export all hidden disks to CAM. */ mpt->phydisk_sim = cam_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, - mpt->unit, &Giant, 1, maxq, devq); + mpt->unit, &mpt->mpt_lock, 1, maxq, devq); if (mpt->phydisk_sim == NULL) { mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n"); error = ENOMEM; @@ -356,9 +368,11 @@ /* * Register this bus. */ + MPT_LOCK(mpt); if (xpt_bus_register(mpt->phydisk_sim, 1) != CAM_SUCCESS) { mpt_prt(mpt, "Physical Disk Bus registration Failed!\n"); error = ENOMEM; + MPT_UNLOCK(mpt); goto cleanup; } @@ -367,13 +381,14 @@ CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n"); error = ENOMEM; + MPT_UNLOCK(mpt); goto cleanup; } + MPT_UNLOCK(mpt); mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n"); return (0); cleanup: -cleanup0: mpt_cam_detach(mpt); return (error); } @@ -796,29 +811,38 @@ int mpt_cam_enable(struct mpt_softc *mpt) { + int error; + + MPT_LOCK(mpt); + + error = EIO; if (mpt->is_fc) { if (mpt_read_config_info_fc(mpt)) { - return (EIO); + goto out; } if (mpt_set_initial_config_fc(mpt)) { - return (EIO); + goto out; } } else if (mpt->is_sas) { if (mpt_read_config_info_sas(mpt)) { - return (EIO); + goto out; } if (mpt_set_initial_config_sas(mpt)) { - return (EIO); + goto out; } } else if (mpt->is_spi) { if (mpt_read_config_info_spi(mpt)) { - return (EIO); + goto out; } if (mpt_set_initial_config_spi(mpt)) { - return (EIO); + goto out; } } - return (0); + error = 0; + +out: + MPT_UNLOCK(mpt); + return (error); } void @@ -832,9 +856,11 @@ /* * Try to add some target command resources */ + MPT_LOCK(mpt); if (mpt_add_target_commands(mpt) == FALSE) { mpt_prt(mpt, "failed to add target commands\n"); } + MPT_UNLOCK(mpt); } mpt->ready = 1; } @@ -844,6 +870,7 @@ { mpt_handler_t handler; + MPT_LOCK(mpt); mpt->ready = 0; mpt_terminate_recovery_thread(mpt); @@ -865,6 +892,7 @@ mpt_free_request(mpt, mpt->tmf_req); mpt->tmf_req = NULL; } + MPT_UNLOCK(mpt); if (mpt->sim != NULL) { xpt_free_path(mpt->path); @@ -905,6 +933,7 @@ ccb = (union ccb *)arg; mpt = ccb->ccb_h.ccb_mpt_ptr; + MPT_LOCK(mpt); req = ccb->ccb_h.ccb_req_ptr; mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req, req->serno, ccb, req->ccb); @@ -915,6 +944,7 @@ req->state |= REQ_STATE_TIMEDOUT; mpt_wakeup_recovery_thread(mpt); } + MPT_UNLOCK(mpt); } /* @@ -1010,7 +1040,9 @@ ccb->ccb_h.status &= ~CAM_SIM_QUEUED; KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); xpt_done(ccb); + CAMLOCK_2_MPTLOCK(mpt); mpt_free_request(mpt, req); + MPTLOCK_2_CAMLOCK(mpt); return; } @@ -1233,7 +1265,9 @@ if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) { request_t *nrq; + CAMLOCK_2_MPTLOCK(mpt); nrq = mpt_get_request(mpt, FALSE); + MPTLOCK_2_CAMLOCK(mpt); if (nrq == NULL) { error = ENOMEM; @@ -1281,17 +1315,18 @@ ccb->ccb_h.status &= ~CAM_SIM_QUEUED; KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); xpt_done(ccb); + CAMLOCK_2_MPTLOCK(mpt); mpt_free_request(mpt, req); + MPTLOCK_2_CAMLOCK(mpt); return; } ccb->ccb_h.status |= CAM_SIM_QUEUED; if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { - ccb->ccb_h.timeout_ch = - timeout(mpt_timeout, (caddr_t)ccb, - (ccb->ccb_h.timeout * hz) / 1000); + mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000, + mpt_timeout, ccb); } else { - callout_handle_init(&ccb->ccb_h.timeout_ch); + mpt_req_timeout_init(req); } if (mpt->verbose > MPT_PRT_DEBUG) { int nc = 0; @@ -1316,7 +1351,9 @@ tgt->state = TGT_STATE_MOVING_DATA; #endif } + CAMLOCK_2_MPTLOCK(mpt); mpt_send_cmd(mpt, req); + MPTLOCK_2_CAMLOCK(mpt); } static void @@ -1405,7 +1442,9 @@ ccb->ccb_h.status &= ~CAM_SIM_QUEUED; KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); xpt_done(ccb); + CAMLOCK_2_MPTLOCK(mpt); mpt_free_request(mpt, req); + MPTLOCK_2_CAMLOCK(mpt); return; } @@ -1627,7 +1666,9 @@ if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) { request_t *nrq; + CAMLOCK_2_MPTLOCK(mpt); nrq = mpt_get_request(mpt, FALSE); + MPTLOCK_2_CAMLOCK(mpt); if (nrq == NULL) { error = ENOMEM; @@ -1675,17 +1716,18 @@ ccb->ccb_h.status &= ~CAM_SIM_QUEUED; KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); xpt_done(ccb); + CAMLOCK_2_MPTLOCK(mpt); mpt_free_request(mpt, req); + MPTLOCK_2_CAMLOCK(mpt); return; } ccb->ccb_h.status |= CAM_SIM_QUEUED; if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { - ccb->ccb_h.timeout_ch = - timeout(mpt_timeout, (caddr_t)ccb, - (ccb->ccb_h.timeout * hz) / 1000); + mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000, + mpt_timeout, ccb); } else { - callout_handle_init(&ccb->ccb_h.timeout_ch); + mpt_req_timeout_init(req); } if (mpt->verbose > MPT_PRT_DEBUG) { int nc = 0; @@ -1710,7 +1752,9 @@ tgt->state = TGT_STATE_MOVING_DATA; #endif } + CAMLOCK_2_MPTLOCK(mpt); mpt_send_cmd(mpt, req); + MPTLOCK_2_CAMLOCK(mpt); } static void @@ -1729,6 +1773,7 @@ mpt = ccb->ccb_h.ccb_mpt_ptr; raid_passthru = (sim == mpt->phydisk_sim); + CAMLOCK_2_MPTLOCK(mpt); if ((req = mpt_get_request(mpt, FALSE)) == NULL) { if (mpt->outofbeer == 0) { mpt->outofbeer = 1; @@ -1737,12 +1782,14 @@ } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); + MPTLOCK_2_CAMLOCK(mpt); xpt_done(ccb); return; } #ifdef INVARIANTS mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__); #endif + MPTLOCK_2_CAMLOCK(mpt); if (sizeof (bus_addr_t) > 4) { cb = mpt_execute_req_a64; @@ -1764,12 +1811,15 @@ mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST; if (raid_passthru) { mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH; + CAMLOCK_2_MPTLOCK(mpt); if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) { + MPTLOCK_2_CAMLOCK(mpt); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE); xpt_done(ccb); return; } + MPTLOCK_2_CAMLOCK(mpt); mpt_req->Bus = 0; /* we never set bus here */ } else { tgt = ccb->ccb_h.target_id; @@ -2063,6 +2113,7 @@ } else { pathid = cam_sim_path(mpt->sim); } + MPTLOCK_2_CAMLOCK(mpt); /* * Allocate a CCB, create a wildcard path for this bus, * and schedule a rescan. @@ -2070,16 +2121,19 @@ ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { mpt_prt(mpt, "unable to alloc CCB for rescan\n"); + CAMLOCK_2_MPTLOCK(mpt); break; } if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, pathid, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { + CAMLOCK_2_MPTLOCK(mpt); mpt_prt(mpt, "unable to create path for rescan\n"); xpt_free_ccb(ccb); break; } xpt_rescan(ccb); + CAMLOCK_2_MPTLOCK(mpt); break; } #else @@ -2174,11 +2228,13 @@ } else { sim = mpt->sim; } + MPTLOCK_2_CAMLOCK(mpt); for (lun_id = 0; lun_id < MPT_MAX_LUNS; lun_id++) { if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim), pqf->TargetID, lun_id) != CAM_REQ_CMP) { mpt_prt(mpt, "unable to create a path to send " "XPT_REL_SIMQ"); + CAMLOCK_2_MPTLOCK(mpt); break; } xpt_setup_ccb(&crs.ccb_h, tmppath, 5); @@ -2191,6 +2247,7 @@ } xpt_free_path(tmppath); } + CAMLOCK_2_MPTLOCK(mpt); break; } case MPI_EVENT_EVENT_CHANGE: @@ -2238,7 +2295,7 @@ } tgt = scsi_req->TargetID; - untimeout(mpt_timeout, ccb, ccb->ccb_h.timeout_ch); + mpt_req_untimeout(req, mpt_timeout, ccb); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { @@ -2286,7 +2343,9 @@ req, req->serno); } KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); + MPTLOCK_2_CAMLOCK(mpt); xpt_done(ccb); + CAMLOCK_2_MPTLOCK(mpt); if ((req->state & REQ_STATE_TIMEDOUT) == 0) { TAILQ_REMOVE(&mpt->request_pending_list, req, links); } else { @@ -2860,6 +2919,7 @@ mpt = (struct mpt_softc *)cam_sim_softc(sim); raid_passthru = (sim == mpt->phydisk_sim); + MPT_LOCK_ASSERT(mpt); tgt = ccb->ccb_h.target_id; lun = ccb->ccb_h.target_lun; @@ -2867,12 +2927,15 @@ ccb->ccb_h.func_code != XPT_PATH_INQ && ccb->ccb_h.func_code != XPT_RESET_BUS && ccb->ccb_h.func_code != XPT_RESET_DEV) { + CAMLOCK_2_MPTLOCK(mpt); if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) { + MPTLOCK_2_CAMLOCK(mpt); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE); xpt_done(ccb); return; } + MPTLOCK_2_CAMLOCK(mpt); } ccb->ccb_h.ccb_mpt_ptr = mpt; @@ -2921,7 +2984,9 @@ } else { xpt_print(ccb->ccb_h.path, "reset device\n"); } + CAMLOCK_2_MPTLOCK(mpt); (void) mpt_bus_reset(mpt, tgt, lun, FALSE); + MPTLOCK_2_CAMLOCK(mpt); /* * mpt_bus_reset is always successful in that it @@ -2935,6 +3000,7 @@ case XPT_ABORT: { union ccb *accb = ccb->cab.abort_ccb; + CAMLOCK_2_MPTLOCK(mpt); switch (accb->ccb_h.func_code) { case XPT_ACCEPT_TARGET_IO: case XPT_IMMED_NOTIFY: @@ -2951,6 +3017,7 @@ ccb->ccb_h.status = CAM_REQ_INVALID; break; } + MPTLOCK_2_CAMLOCK(mpt); break; } @@ -3090,6 +3157,7 @@ period >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD; } #endif + CAMLOCK_2_MPTLOCK(mpt); if (dval & DP_DISC_ENABLE) { mpt->mpt_disc_enable |= (1 << tgt); } else if (dval & DP_DISC_DISABL) { @@ -3107,6 +3175,7 @@ mpt_setsync(mpt, tgt, period, offset); } if (dval == 0) { + MPTLOCK_2_CAMLOCK(mpt); mpt_set_ccb_status(ccb, CAM_REQ_CMP); break; } @@ -3118,6 +3187,7 @@ } else { mpt_set_ccb_status(ccb, CAM_REQ_CMP); } + MPTLOCK_2_CAMLOCK(mpt); break; } case XPT_GET_TRAN_SETTINGS: @@ -3288,12 +3358,14 @@ { int result; + CAMLOCK_2_MPTLOCK(mpt); if (ccb->cel.enable) result = mpt_enable_lun(mpt, ccb->ccb_h.target_id, ccb->ccb_h.target_lun); else result = mpt_disable_lun(mpt, ccb->ccb_h.target_id, ccb->ccb_h.target_lun); + MPTLOCK_2_CAMLOCK(mpt); if (result == 0) { mpt_set_ccb_status(ccb, CAM_REQ_CMP); } else { @@ -3323,6 +3395,7 @@ } else { trtp = &mpt->trt[lun]; } + CAMLOCK_2_MPTLOCK(mpt); if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { mpt_lprt(mpt, MPT_PRT_DEBUG1, "Put FREE ATIO %p lun %d\n", ccb, lun); @@ -3337,10 +3410,13 @@ mpt_lprt(mpt, MPT_PRT_ALWAYS, "Got Notify ACK\n"); } mpt_set_ccb_status(ccb, CAM_REQ_INPROG); + MPTLOCK_2_CAMLOCK(mpt); return; } case XPT_CONT_TARGET_IO: + CAMLOCK_2_MPTLOCK(mpt); mpt_target_start_io(mpt, ccb); + MPTLOCK_2_CAMLOCK(mpt); return; default: @@ -3384,13 +3460,16 @@ CONFIG_PAGE_SCSI_DEVICE_0 tmp; dval = 0; + CAMLOCK_2_MPTLOCK(mpt); tmp = mpt->mpt_dev_page0[tgt]; rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header, sizeof(tmp), FALSE, 5000); if (rv) { + MPTLOCK_2_CAMLOCK(mpt); mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt); return (rv); } + MPTLOCK_2_CAMLOCK(mpt); mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_get_spi_settings[%d]: current NP %x Info %x\n", tgt, tmp.NegotiatedParameters, tmp.Information); @@ -3587,10 +3666,8 @@ { struct mpt_softc *mpt; -#if __FreeBSD_version >= 500000 - mtx_lock(&Giant); -#endif mpt = (struct mpt_softc *)arg; + MPT_LOCK(mpt); for (;;) { if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { if (mpt->shutdwn_recovery == 0) { @@ -3604,9 +3681,7 @@ } mpt->recovery_thread = NULL; wakeup(&mpt->recovery_thread); -#if __FreeBSD_version >= 500000 - mtx_unlock(&Giant); -#endif + MPT_UNLOCK(mpt); kthread_exit(0); } @@ -4051,14 +4126,18 @@ xpt_freeze_simq(mpt->sim, 1); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; + MPTLOCK_2_CAMLOCK(mpt); xpt_done(ccb); + CAMLOCK_2_MPTLOCK(mpt); return; default: mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request " "starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id); mpt_tgt_dump_req_state(mpt, cmd_req); mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); + MPTLOCK_2_CAMLOCK(mpt); xpt_done(ccb); + CAMLOCK_2_MPTLOCK(mpt); return; } @@ -4078,7 +4157,9 @@ } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); + MPTLOCK_2_CAMLOCK(mpt); xpt_done(ccb); + CAMLOCK_2_MPTLOCK(mpt); return; } ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG; @@ -4152,6 +4233,7 @@ "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len, tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state); + MPTLOCK_2_CAMLOCK(mpt); if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) { if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) { int error; @@ -4191,6 +4273,7 @@ (*cb)(req, sgs, csio->sglist_cnt, 0); } } + CAMLOCK_2_MPTLOCK(mpt); } else { uint8_t *sp = NULL, sense[MPT_SENSE_SIZE]; @@ -4207,7 +4290,9 @@ ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered); mpt_set_ccb_status(ccb, CAM_REQ_CMP); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; + MPTLOCK_2_CAMLOCK(mpt); xpt_done(ccb); + CAMLOCK_2_MPTLOCK(mpt); return; } if (ccb->ccb_h.flags & CAM_SEND_SENSE) { @@ -4413,7 +4498,9 @@ if (ccb) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); + MPTLOCK_2_CAMLOCK(mpt); xpt_done(ccb); + CAMLOCK_2_MPTLOCK(mpt); } else { mpt_prt(mpt, "could not allocate status request- dropping\n"); @@ -4534,7 +4621,7 @@ req->serno, tgt->resid); if (ccb) { ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG; - ccb->ccb_h.timeout_ch = timeout(mpt_timeout, ccb, 60 * hz); + mpt_req_timeout(req, 60 * hz, mpt_timeout, ccb); } mpt_send_cmd(mpt, req); } @@ -4588,7 +4675,9 @@ } tgt->ccb = (union ccb *) inot; inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; + MPTLOCK_2_CAMLOCK(mpt); xpt_done((union ccb *)inot); + CAMLOCK_2_MPTLOCK(mpt); } static void @@ -4867,7 +4956,9 @@ itag, atiop->tag_id, tgt->reply_desc, tgt->resid); } + MPTLOCK_2_CAMLOCK(mpt); xpt_done((union ccb *)atiop); + CAMLOCK_2_MPTLOCK(mpt); } static void @@ -4945,7 +5036,7 @@ } tgt->ccb = NULL; tgt->nxfers++; - untimeout(mpt_timeout, ccb, ccb->ccb_h.timeout_ch); + mpt_req_untimeout(req, mpt_timeout, ccb); mpt_lprt(mpt, MPT_PRT_DEBUG, "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n", ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id); @@ -4975,7 +5066,9 @@ mpt->outofbeer = 0; mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); } + MPTLOCK_2_CAMLOCK(mpt); xpt_done(ccb); + CAMLOCK_2_MPTLOCK(mpt); break; } /* @@ -5008,8 +5101,7 @@ TGT_STATE_MOVING_DATA_AND_STATUS) { tgt->nxfers++; } - untimeout(mpt_timeout, ccb, - ccb->ccb_h.timeout_ch); + mpt_req_untimeout(req, mpt_timeout, ccb); if (ccb->ccb_h.flags & CAM_SEND_SENSE) { ccb->ccb_h.status |= CAM_SENT_SENSE; } @@ -5059,7 +5151,9 @@ mpt->outofbeer = 0; mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); } + MPTLOCK_2_CAMLOCK(mpt); xpt_done(ccb); + CAMLOCK_2_MPTLOCK(mpt); } break; } ==== //depot/projects/mjexp/sys/dev/mpt/mpt_pci.c#11 (text+ko) ==== @@ -334,6 +334,7 @@ } mpt->do_cfg_role = 1; } + mpt->msi_enable = 0; } #else static void @@ -360,6 +361,11 @@ } tval = 0; + mpt->msi_enable = 0; + if (resource_int_value(device_get_name(mpt->dev), + device_get_unit(mpt->dev), "msi_enable", &tval) == 0 && tval == 1) { + mpt->msi_enable = 1; + } } #endif @@ -522,26 +528,26 @@ /* Get a handle to the interrupt */ iqd = 0; - - /* - * First try to alloc an MSI-X message. If that - * fails, then try to alloc an MSI message instead. - * Don't do this for U320 chips. - */ - if (mpt->is_spi == 0 && pci_msix_count(dev) == 1) { - mpt->pci_msi_count = 1; - if (pci_alloc_msix(dev, &mpt->pci_msi_count) == 0) { - iqd = 1; - } else { - mpt->pci_msi_count = 0; + if (mpt->msi_enable) { + /* + * First try to alloc an MSI-X message. If that >>> TRUNCATED FOR MAIL (1000 lines) <<<