From owner-svn-src-all@FreeBSD.ORG Wed Jun 9 21:40:38 2010 Return-Path: Delivered-To: svn-src-all@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:4f8:fff6::34]) by hub.freebsd.org (Postfix) with ESMTP id 841501065686; Wed, 9 Jun 2010 21:40:38 +0000 (UTC) (envelope-from delphij@FreeBSD.org) Received: from svn.freebsd.org (unknown [IPv6:2001:4f8:fff6::2c]) by mx1.freebsd.org (Postfix) with ESMTP id 72C6A8FC14; Wed, 9 Jun 2010 21:40:38 +0000 (UTC) Received: from svn.freebsd.org (localhost [127.0.0.1]) by svn.freebsd.org (8.14.3/8.14.3) with ESMTP id o59Lec21048598; Wed, 9 Jun 2010 21:40:38 GMT (envelope-from delphij@svn.freebsd.org) Received: (from delphij@localhost) by svn.freebsd.org (8.14.3/8.14.3/Submit) id o59LecIF048587; Wed, 9 Jun 2010 21:40:38 GMT (envelope-from delphij@svn.freebsd.org) Message-Id: <201006092140.o59LecIF048587@svn.freebsd.org> From: Xin LI Date: Wed, 9 Jun 2010 21:40:38 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org X-SVN-Group: head MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Cc: Subject: svn commit: r208969 - head/sys/dev/twa X-BeenThere: svn-src-all@freebsd.org X-Mailman-Version: 2.1.5 Precedence: list List-Id: "SVN commit messages for the entire src tree \(except for " user" and " projects" \)" List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 09 Jun 2010 21:40:38 -0000 Author: delphij Date: Wed Jun 9 21:40:38 2010 New Revision: 208969 URL: http://svn.freebsd.org/changeset/base/208969 Log: Apply driver update from LSI. Many thanks to LSI for continuing to support FreeBSD. 1) Timeout ioctl command timeouts. Do not reset the controller if ioctl command completed successfully. 2) Remove G66_WORKAROUND code (this bug never shipped). 3) Remove unnecessary interrupt lock (intr_lock). 4) Timeout firmware handshake for PChip reset (don't wait forever). 5) Handle interrupts inline. 6) Unmask command interrupt ONLY when adding a command to the pending queue. 7) Mask command interrupt ONLY after removing the last command from the pending queue. 8) Remove TW_OSLI_DEFERRED_INTR_USED code. 9) Replace controller "state" with separate data fields to avoid races: TW_CLI_CTLR_STATE_ACTIVE ctlr->active TW_CLI_CTLR_STATE_INTR_ENABLED ctlr->interrupts_enabled TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY ctlr->internal_req_busy TW_CLI_CTLR_STATE_GET_MORE_AENS ctlr->get_more_aens TW_CLI_CTLR_STATE_RESET_IN_PROGRESS ctlr->reset_in_progress TW_CLI_CTLR_STATE_RESET_PHASE1_IN_PROGRESS ctlr->reset_phase1_in_progress 10) Fix "req" leak in twa_action() when simq is frozen and req is NOT null. 11) Replace softc "state" with separate data fields to avoid races: TW_OSLI_CTLR_STATE_OPEN sc->open TW_OSLI_CTLR_STATE_SIMQ_FROZEN sc->simq_frozen 12) Fix reference to TW_OSLI_REQ_FLAGS_IN_PROGRESS in tw_osl_complete_passthru() 13) Use correct CAM status values. Change CAM_REQ_CMP_ERR to CAM_REQ_INVALID. Remove use of CAM_RELEASE_SIMQ for physical data addresses. 14) Do not freeze/ release the simq with non I/O commands. When it is appropriate to temporarily freeze the simq with an I/O command use: xpt_freeze_simq(sim, 1); ccb->ccb_h.status |= CAM_RELEASE_SIMQ; otherwise use: xpt_freeze_simq(sim, 1); xpt_release_simq(sim, 1); Submitted by: Tom Couch PR: kern/147695 MFC after: 3 days Modified: head/sys/dev/twa/tw_cl.h head/sys/dev/twa/tw_cl_fwif.h head/sys/dev/twa/tw_cl_init.c head/sys/dev/twa/tw_cl_intr.c head/sys/dev/twa/tw_cl_io.c head/sys/dev/twa/tw_cl_misc.c head/sys/dev/twa/tw_cl_share.h head/sys/dev/twa/tw_osl.h head/sys/dev/twa/tw_osl_cam.c head/sys/dev/twa/tw_osl_externs.h head/sys/dev/twa/tw_osl_freebsd.c head/sys/dev/twa/tw_osl_inline.h head/sys/dev/twa/tw_osl_share.h Modified: head/sys/dev/twa/tw_cl.h ============================================================================== --- head/sys/dev/twa/tw_cl.h Wed Jun 9 20:20:24 2010 (r208968) +++ head/sys/dev/twa/tw_cl.h Wed Jun 9 21:40:38 2010 (r208969) @@ -51,22 +51,6 @@ #define TW_CLI_RESET_TIMEOUT_PERIOD 60 /* seconds */ #define TW_CLI_MAX_RESET_ATTEMPTS 2 -/* Possible values of ctlr->state. */ -/* Initialization done, and controller is active. */ -#define TW_CLI_CTLR_STATE_ACTIVE (1<<0) -/* Interrupts on controller enabled. */ -#define TW_CLI_CTLR_STATE_INTR_ENABLED (1<<1) -/* Data buffer for internal requests in use. */ -#define TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY (1<<2) -/* More AEN's need to be retrieved. */ -#define TW_CLI_CTLR_STATE_GET_MORE_AENS (1<<3) -/* Controller is being reset. */ -#define TW_CLI_CTLR_STATE_RESET_IN_PROGRESS (1<<4) -/* G133 controller is in 'phase 1' of being reset. */ -#define TW_CLI_CTLR_STATE_RESET_PHASE1_IN_PROGRESS (1<<5) -/* G66 register write access bug needs to be worked around. */ -#define TW_CLI_CTLR_STATE_G66_WORKAROUND_NEEDED (1<<6) - /* Possible values of ctlr->ioctl_lock.lock. */ #define TW_CLI_LOCK_FREE 0x0 /* lock is free */ #define TW_CLI_LOCK_HELD 0x1 /* lock is held */ @@ -146,7 +130,12 @@ struct tw_cli_ctlr_context { TW_UINT32 device_id; /* controller device id */ TW_UINT32 arch_id; /* controller architecture id */ - TW_UINT32 state; /* controller state */ + TW_UINT8 active; /* Initialization done, and controller is active. */ + TW_UINT8 interrupts_enabled; /* Interrupts on controller enabled. */ + TW_UINT8 internal_req_busy; /* Data buffer for internal requests in use. */ + TW_UINT8 get_more_aens; /* More AEN's need to be retrieved. */ + TW_UINT8 reset_in_progress; /* Controller is being reset. */ + TW_UINT8 reset_phase1_in_progress; /* In 'phase 1' of reset. */ TW_UINT32 flags; /* controller settings */ TW_UINT32 sg_size_factor; /* SG element size should be a multiple of this */ @@ -199,10 +188,6 @@ struct tw_cli_ctlr_context { submission */ TW_LOCK_HANDLE *io_lock;/* ptr to lock held during cmd submission */ - TW_LOCK_HANDLE intr_lock_handle;/* lock held during - ISR/response intr processing */ - TW_LOCK_HANDLE *intr_lock;/* ptr to lock held during ISR/ - response intr processing */ #ifdef TW_OSL_CAN_SLEEP TW_SLEEP_HANDLE sleep_handle; /* handle to co-ordinate sleeps Modified: head/sys/dev/twa/tw_cl_fwif.h ============================================================================== --- head/sys/dev/twa/tw_cl_fwif.h Wed Jun 9 20:20:24 2010 (r208968) +++ head/sys/dev/twa/tw_cl_fwif.h Wed Jun 9 21:40:38 2010 (r208969) @@ -89,7 +89,6 @@ #define TWA_STATUS_MINOR_VERSION_MASK 0x0F000000 #define TWA_STATUS_MAJOR_VERSION_MASK 0xF0000000 -#define TWA_STATUS_EXPECTED_BITS 0x00002000 #define TWA_STATUS_UNEXPECTED_BITS 0x00F00000 @@ -142,7 +141,7 @@ #define TWA_BASE_FW_SRL 24 #define TWA_BASE_FW_BRANCH 0 #define TWA_BASE_FW_BUILD 1 -#define TWA_CURRENT_FW_SRL 30 +#define TWA_CURRENT_FW_SRL 41 #define TWA_CURRENT_FW_BRANCH_9K 4 #define TWA_CURRENT_FW_BUILD_9K 8 #define TWA_CURRENT_FW_BRANCH_9K_X 8 Modified: head/sys/dev/twa/tw_cl_init.c ============================================================================== --- head/sys/dev/twa/tw_cl_init.c Wed Jun 9 20:20:24 2010 (r208968) +++ head/sys/dev/twa/tw_cl_init.c Wed Jun 9 21:40:38 2010 (r208969) @@ -208,7 +208,7 @@ tw_cl_get_mem_requirements(struct tw_cl_ */ *non_dma_mem_size = sizeof(struct tw_cli_ctlr_context) + - (sizeof(struct tw_cli_req_context) * (max_simult_reqs + 1)) + + (sizeof(struct tw_cli_req_context) * max_simult_reqs) + (sizeof(struct tw_cl_event_packet) * max_aens); @@ -220,7 +220,7 @@ tw_cl_get_mem_requirements(struct tw_cl_ */ *dma_mem_size = (sizeof(struct tw_cl_command_packet) * - (max_simult_reqs + 1)) + (TW_CLI_SECTOR_SIZE); + (max_simult_reqs)) + (TW_CLI_SECTOR_SIZE); return(0); } @@ -287,12 +287,12 @@ tw_cl_init_ctlr(struct tw_cl_ctlr_handle } tw_osl_memzero(non_dma_mem, sizeof(struct tw_cli_ctlr_context) + - (sizeof(struct tw_cli_req_context) * (max_simult_reqs + 1)) + + (sizeof(struct tw_cli_req_context) * max_simult_reqs) + (sizeof(struct tw_cl_event_packet) * max_aens)); tw_osl_memzero(dma_mem, (sizeof(struct tw_cl_command_packet) * - (max_simult_reqs + 1)) + + max_simult_reqs) + TW_CLI_SECTOR_SIZE); free_non_dma_mem = (TW_UINT8 *)non_dma_mem; @@ -307,7 +307,7 @@ tw_cl_init_ctlr(struct tw_cl_ctlr_handle ctlr->arch_id = TWA_ARCH_ID(device_id); ctlr->flags = flags; ctlr->sg_size_factor = TWA_SG_ELEMENT_SIZE_FACTOR(device_id); - ctlr->max_simult_reqs = max_simult_reqs + 1; + ctlr->max_simult_reqs = max_simult_reqs; ctlr->max_aens_supported = max_aens; /* Initialize queues of CL internal request context packets. */ @@ -321,55 +321,23 @@ tw_cl_init_ctlr(struct tw_cl_ctlr_handle tw_osl_init_lock(ctlr_handle, "tw_cl_gen_lock", ctlr->gen_lock); ctlr->io_lock = &(ctlr->io_lock_handle); tw_osl_init_lock(ctlr_handle, "tw_cl_io_lock", ctlr->io_lock); - /* - * If 64 bit cmd pkt addresses are used, we will need to serialize - * writes to the hardware (across registers), since existing (G66) - * hardware will get confused if, for example, we wrote the low 32 bits - * of the cmd pkt address, followed by a response interrupt mask to the - * control register, followed by the high 32 bits of the cmd pkt - * address. It will then interpret the value written to the control - * register as the low cmd pkt address. So, for this case, we will - * make a note that we will need to synchronize control register writes - * with command register writes. - */ - if ((ctlr->flags & TW_CL_64BIT_ADDRESSES) && - ((ctlr->device_id == TW_CL_DEVICE_ID_9K) || - (ctlr->device_id == TW_CL_DEVICE_ID_9K_X) || - (ctlr->device_id == TW_CL_DEVICE_ID_9K_E) || - (ctlr->device_id == TW_CL_DEVICE_ID_9K_SA))) { - ctlr->state |= TW_CLI_CTLR_STATE_G66_WORKAROUND_NEEDED; - ctlr->intr_lock = ctlr->io_lock; - } else { - ctlr->intr_lock = &(ctlr->intr_lock_handle); - tw_osl_init_lock(ctlr_handle, "tw_cl_intr_lock", - ctlr->intr_lock); - } /* Initialize CL internal request context packets. */ ctlr->req_ctxt_buf = (struct tw_cli_req_context *)free_non_dma_mem; free_non_dma_mem += (sizeof(struct tw_cli_req_context) * - ( - max_simult_reqs + - 1)); + max_simult_reqs); ctlr->cmd_pkt_buf = (struct tw_cl_command_packet *)dma_mem; ctlr->cmd_pkt_phys = dma_mem_phys; ctlr->internal_req_data = (TW_UINT8 *) (ctlr->cmd_pkt_buf + - ( - max_simult_reqs + - 1)); + max_simult_reqs); ctlr->internal_req_data_phys = ctlr->cmd_pkt_phys + (sizeof(struct tw_cl_command_packet) * - ( - max_simult_reqs + - 1)); - - for (i = 0; - i < ( - max_simult_reqs + - 1); i++) { + max_simult_reqs); + + for (i = 0; i < max_simult_reqs; i++) { req = &(ctlr->req_ctxt_buf[i]); req->cmd_pkt = &(ctlr->cmd_pkt_buf[i]); @@ -421,8 +389,8 @@ start_ctlr: /* Notify some info about the controller to the OSL. */ tw_cli_notify_ctlr_info(ctlr); - /* Mark the controller as active. */ - ctlr->state |= TW_CLI_CTLR_STATE_ACTIVE; + /* Mark the controller active. */ + ctlr->active = TW_CL_TRUE; return(error); } @@ -597,7 +565,7 @@ tw_cl_shutdown_ctlr(struct tw_cl_ctlr_ha * Mark the controller as inactive, disable any further interrupts, * and notify the controller that we are going down. */ - ctlr->state &= ~TW_CLI_CTLR_STATE_ACTIVE; + ctlr->active = TW_CL_FALSE; tw_cli_disable_interrupts(ctlr); @@ -617,8 +585,6 @@ tw_cl_shutdown_ctlr(struct tw_cl_ctlr_ha /* Destroy all locks used by CL. */ tw_osl_destroy_lock(ctlr_handle, ctlr->gen_lock); tw_osl_destroy_lock(ctlr_handle, ctlr->io_lock); - if (!(ctlr->flags & TW_CL_64BIT_ADDRESSES)) - tw_osl_destroy_lock(ctlr_handle, ctlr->intr_lock); ret: return(error); Modified: head/sys/dev/twa/tw_cl_intr.c ============================================================================== --- head/sys/dev/twa/tw_cl_intr.c Wed Jun 9 20:20:24 2010 (r208968) +++ head/sys/dev/twa/tw_cl_intr.c Wed Jun 9 21:40:38 2010 (r208969) @@ -75,22 +75,16 @@ tw_cl_interrupt(struct tw_cl_ctlr_handle if (ctlr == NULL) goto out; - /* If we get an interrupt while resetting, it is a shared - one for another device, so just bail */ - if (ctlr->state & TW_CLI_CTLR_STATE_RESET_IN_PROGRESS) - goto out; - /* - * Synchronize access between writes to command and control registers - * in 64-bit environments, on G66. + * Bail If we get an interrupt while resetting, or shutting down. */ - if (ctlr->state & TW_CLI_CTLR_STATE_G66_WORKAROUND_NEEDED) - tw_osl_get_lock(ctlr_handle, ctlr->io_lock); + if (ctlr->reset_in_progress || !(ctlr->active)) + goto out; /* Read the status register to determine the type of interrupt. */ status_reg = TW_CLI_READ_STATUS_REGISTER(ctlr_handle); if (tw_cli_check_ctlr_state(ctlr, status_reg)) - goto out_unlock; + goto out; /* Clear the interrupt. */ if (status_reg & TWA_STATUS_HOST_INTERRUPT) { @@ -98,36 +92,30 @@ tw_cl_interrupt(struct tw_cl_ctlr_handle "Host interrupt"); TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle, TWA_CONTROL_CLEAR_HOST_INTERRUPT); - ctlr->host_intr_pending = 0; /* we don't use this */ - rc |= TW_CL_FALSE; /* don't request for a deferred isr call */ } if (status_reg & TWA_STATUS_ATTENTION_INTERRUPT) { tw_cli_dbg_printf(6, ctlr_handle, tw_osl_cur_func(), "Attention interrupt"); + rc |= TW_CL_TRUE; /* request for a deferred isr call */ + tw_cli_process_attn_intr(ctlr); TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle, TWA_CONTROL_CLEAR_ATTENTION_INTERRUPT); - ctlr->attn_intr_pending = 1; - rc |= TW_CL_TRUE; /* request for a deferred isr call */ } if (status_reg & TWA_STATUS_COMMAND_INTERRUPT) { tw_cli_dbg_printf(6, ctlr_handle, tw_osl_cur_func(), "Command interrupt"); - TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle, - TWA_CONTROL_MASK_COMMAND_INTERRUPT); - ctlr->cmd_intr_pending = 1; rc |= TW_CL_TRUE; /* request for a deferred isr call */ + tw_cli_process_cmd_intr(ctlr); + if ((TW_CL_Q_FIRST_ITEM(&(ctlr->req_q_head[TW_CLI_PENDING_Q]))) == TW_CL_NULL) + TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle, + TWA_CONTROL_MASK_COMMAND_INTERRUPT); } if (status_reg & TWA_STATUS_RESPONSE_INTERRUPT) { tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(), "Response interrupt"); - TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle, - TWA_CONTROL_MASK_RESPONSE_INTERRUPT); - ctlr->resp_intr_pending = 1; rc |= TW_CL_TRUE; /* request for a deferred isr call */ + tw_cli_process_resp_intr(ctlr); } -out_unlock: - if (ctlr->state & TW_CLI_CTLR_STATE_G66_WORKAROUND_NEEDED) - tw_osl_free_lock(ctlr_handle, ctlr->io_lock); out: return(rc); } @@ -135,52 +123,6 @@ out: /* - * Function name: tw_cl_deferred_interrupt - * Description: Deferred interrupt handler. Does most of the processing - * related to an interrupt. - * - * Input: ctlr_handle -- controller handle - * Output: None - * Return value: None - */ -TW_VOID -tw_cl_deferred_interrupt(struct tw_cl_ctlr_handle *ctlr_handle) -{ - struct tw_cli_ctlr_context *ctlr = - (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt); - - tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(), "entered"); - - /* Dispatch based on the kind of interrupt. */ - if (ctlr->host_intr_pending) { - tw_cli_dbg_printf(6, ctlr_handle, tw_osl_cur_func(), - "Processing Host interrupt"); - ctlr->host_intr_pending = 0; - tw_cli_process_host_intr(ctlr); - } - if (ctlr->attn_intr_pending) { - tw_cli_dbg_printf(6, ctlr_handle, tw_osl_cur_func(), - "Processing Attention interrupt"); - ctlr->attn_intr_pending = 0; - tw_cli_process_attn_intr(ctlr); - } - if (ctlr->cmd_intr_pending) { - tw_cli_dbg_printf(6, ctlr_handle, tw_osl_cur_func(), - "Processing Command interrupt"); - ctlr->cmd_intr_pending = 0; - tw_cli_process_cmd_intr(ctlr); - } - if (ctlr->resp_intr_pending) { - tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(), - "Processing Response interrupt"); - ctlr->resp_intr_pending = 0; - tw_cli_process_resp_intr(ctlr); - } -} - - - -/* * Function name: tw_cli_process_host_intr * Description: This function gets called if we triggered an interrupt. * We don't use it as of now. @@ -248,12 +190,6 @@ tw_cli_process_cmd_intr(struct tw_cli_ct { tw_cli_dbg_printf(6, ctlr->ctlr_handle, tw_osl_cur_func(), "entered"); - /* - * Let the OS Layer submit any requests in its pending queue, - * if it has one. - */ - tw_osl_ctlr_ready(ctlr->ctlr_handle); - /* Start any requests that might be in the pending queue. */ tw_cli_submit_pending_queue(ctlr); @@ -286,9 +222,6 @@ tw_cli_process_resp_intr(struct tw_cli_c tw_cli_dbg_printf(10, ctlr->ctlr_handle, tw_osl_cur_func(), "entered"); - /* Serialize access to the controller response queue. */ - tw_osl_get_lock(ctlr->ctlr_handle, ctlr->intr_lock); - for (;;) { status_reg = TW_CLI_READ_STATUS_REGISTER(ctlr->ctlr_handle); if ((error = tw_cli_check_ctlr_state(ctlr, status_reg))) @@ -315,7 +248,6 @@ tw_cli_process_resp_intr(struct tw_cli_c #ifdef TW_OSL_DEBUG tw_cl_print_ctlr_stats(ctlr->ctlr_handle); #endif /* TW_OSL_DEBUG */ - tw_osl_free_lock(ctlr->ctlr_handle, ctlr->intr_lock); tw_cl_reset_ctlr(ctlr->ctlr_handle); return(TW_OSL_EIO); } @@ -330,12 +262,6 @@ tw_cli_process_resp_intr(struct tw_cli_c } - /* Unmask the response interrupt. */ - TW_CLI_WRITE_CONTROL_REGISTER(ctlr->ctlr_handle, - TWA_CONTROL_UNMASK_RESPONSE_INTERRUPT); - - tw_osl_free_lock(ctlr->ctlr_handle, ctlr->intr_lock); - /* Complete this, and other requests in the complete queue. */ tw_cli_process_complete_queue(ctlr); @@ -614,12 +540,11 @@ tw_cli_param_callback(struct tw_cli_req_ "status = %d", cmd->param.status); } - ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY; + ctlr->internal_req_busy = TW_CL_FALSE; tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q); - if ((ctlr->state & TW_CLI_CTLR_STATE_GET_MORE_AENS) && - (!(ctlr->state & TW_CLI_CTLR_STATE_RESET_IN_PROGRESS))) { - ctlr->state &= ~TW_CLI_CTLR_STATE_GET_MORE_AENS; + if ((ctlr->get_more_aens) && (!(ctlr->reset_in_progress))) { + ctlr->get_more_aens = TW_CL_FALSE; tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "Fetching more AEN's"); if ((error = tw_cli_get_aen(ctlr))) @@ -677,7 +602,7 @@ tw_cli_aen_callback(struct tw_cli_req_co } if (error) { - ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY; + ctlr->internal_req_busy = TW_CL_FALSE; tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q); return; } @@ -688,7 +613,7 @@ tw_cli_aen_callback(struct tw_cli_req_co aen_code = tw_cli_manage_aen(ctlr, req); if (aen_code != TWA_AEN_SYNC_TIME_WITH_HOST) { - ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY; + ctlr->internal_req_busy = TW_CL_FALSE; tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q); if (aen_code != TWA_AEN_QUEUE_EMPTY) if ((error = tw_cli_get_aen(ctlr))) @@ -736,25 +661,25 @@ tw_cli_manage_aen(struct tw_cli_ctlr_con * Free the internal req pkt right here, since * tw_cli_set_param will need it. */ - ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY; + ctlr->internal_req_busy = TW_CL_FALSE; tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q); /* * We will use a callback in tw_cli_set_param only when * interrupts are enabled and we can expect our callback - * to get called. Setting the TW_CLI_CTLR_STATE_GET_MORE_AENS + * to get called. Setting the get_more_aens * flag will make the callback continue to try to retrieve * more AEN's. */ - if (ctlr->state & TW_CLI_CTLR_STATE_INTR_ENABLED) - ctlr->state |= TW_CLI_CTLR_STATE_GET_MORE_AENS; + if (ctlr->interrupts_enabled) + ctlr->get_more_aens = TW_CL_TRUE; /* Calculate time (in seconds) since last Sunday 12.00 AM. */ local_time = tw_osl_get_local_time(); sync_time = (local_time - (3 * 86400)) % 604800; if ((error = tw_cli_set_param(ctlr, TWA_PARAM_TIME_TABLE, TWA_PARAM_TIME_SCHED_TIME, 4, &sync_time, - (ctlr->state & TW_CLI_CTLR_STATE_INTR_ENABLED) + (ctlr->interrupts_enabled) ? tw_cli_param_callback : TW_CL_NULL))) tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE, TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR, @@ -799,7 +724,7 @@ tw_cli_enable_interrupts(struct tw_cli_c { tw_cli_dbg_printf(3, ctlr->ctlr_handle, tw_osl_cur_func(), "entered"); - ctlr->state |= TW_CLI_CTLR_STATE_INTR_ENABLED; + ctlr->interrupts_enabled = TW_CL_TRUE; TW_CLI_WRITE_CONTROL_REGISTER(ctlr->ctlr_handle, TWA_CONTROL_CLEAR_ATTENTION_INTERRUPT | TWA_CONTROL_UNMASK_RESPONSE_INTERRUPT | @@ -823,6 +748,6 @@ tw_cli_disable_interrupts(struct tw_cli_ TW_CLI_WRITE_CONTROL_REGISTER(ctlr->ctlr_handle, TWA_CONTROL_DISABLE_INTERRUPTS); - ctlr->state &= ~TW_CLI_CTLR_STATE_INTR_ENABLED; + ctlr->interrupts_enabled = TW_CL_FALSE; } Modified: head/sys/dev/twa/tw_cl_io.c ============================================================================== --- head/sys/dev/twa/tw_cl_io.c Wed Jun 9 20:20:24 2010 (r208968) +++ head/sys/dev/twa/tw_cl_io.c Wed Jun 9 21:40:38 2010 (r208969) @@ -49,6 +49,10 @@ #include "tw_cl_externs.h" #include "tw_osl_ioctl.h" +#include +#include +#include + /* @@ -76,11 +80,9 @@ tw_cl_start_io(struct tw_cl_ctlr_handle ctlr = (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt); - if (ctlr->state & TW_CLI_CTLR_STATE_RESET_IN_PROGRESS) { + if (ctlr->reset_in_progress) { tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(), - "I/O during reset: returning busy. Ctlr state = 0x%x", - ctlr->state); - tw_osl_ctlr_busy(ctlr_handle, req_handle); + "I/O during reset: returning busy."); return(TW_OSL_EBUSY); } @@ -101,7 +103,6 @@ tw_cl_start_io(struct tw_cl_ctlr_handle )) == TW_CL_NULL) { tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(), "Out of request context packets: returning busy"); - tw_osl_ctlr_busy(ctlr_handle, req_handle); return(TW_OSL_EBUSY); } @@ -171,7 +172,6 @@ tw_cli_submit_cmd(struct tw_cli_req_cont struct tw_cl_ctlr_handle *ctlr_handle = ctlr->ctlr_handle; TW_UINT32 status_reg; TW_INT32 error; - TW_UINT8 notify_osl_of_ctlr_busy = TW_CL_FALSE; tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(), "entered"); @@ -208,10 +208,13 @@ tw_cli_submit_cmd(struct tw_cli_req_cont req->state = TW_CLI_REQ_STATE_PENDING; tw_cli_req_q_insert_tail(req, TW_CLI_PENDING_Q); error = 0; + /* Unmask command interrupt. */ + TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle, + TWA_CONTROL_UNMASK_COMMAND_INTERRUPT); } else error = TW_OSL_EBUSY; } else { - notify_osl_of_ctlr_busy = TW_CL_TRUE; + tw_osl_ctlr_busy(ctlr_handle, req->req_handle); error = TW_OSL_EBUSY; } } else { @@ -246,25 +249,6 @@ tw_cli_submit_cmd(struct tw_cli_req_cont out: tw_osl_free_lock(ctlr_handle, ctlr->io_lock); - if (status_reg & TWA_STATUS_COMMAND_QUEUE_FULL) { - if (notify_osl_of_ctlr_busy) - tw_osl_ctlr_busy(ctlr_handle, req->req_handle); - - /* - * Synchronize access between writes to command and control - * registers in 64-bit environments, on G66. - */ - if (ctlr->state & TW_CLI_CTLR_STATE_G66_WORKAROUND_NEEDED) - tw_osl_get_lock(ctlr_handle, ctlr->io_lock); - - /* Unmask command interrupt. */ - TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle, - TWA_CONTROL_UNMASK_COMMAND_INTERRUPT); - - if (ctlr->state & TW_CLI_CTLR_STATE_G66_WORKAROUND_NEEDED) - tw_osl_free_lock(ctlr_handle, ctlr->io_lock); - } - return(error); } @@ -299,12 +283,9 @@ tw_cl_fw_passthru(struct tw_cl_ctlr_hand ctlr = (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt); - if (ctlr->state & TW_CLI_CTLR_STATE_RESET_IN_PROGRESS) { + if (ctlr->reset_in_progress) { tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(), - "Passthru request during reset: returning busy. " - "Ctlr state = 0x%x", - ctlr->state); - tw_osl_ctlr_busy(ctlr_handle, req_handle); + "Passthru request during reset: returning busy."); return(TW_OSL_EBUSY); } @@ -312,7 +293,6 @@ tw_cl_fw_passthru(struct tw_cl_ctlr_hand )) == TW_CL_NULL) { tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(), "Out of request context packets: returning busy"); - tw_osl_ctlr_busy(ctlr_handle, req_handle); return(TW_OSL_EBUSY); } @@ -759,11 +739,11 @@ tw_cli_get_param(struct tw_cli_ctlr_cont goto out; /* Make sure this is the only CL internal request at this time. */ - if (ctlr->state & TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY) { + if (ctlr->internal_req_busy) { error = TW_OSL_EBUSY; goto out; } - ctlr->state |= TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY; + ctlr->internal_req_busy = TW_CL_TRUE; req->data = ctlr->internal_req_data; req->data_phys = ctlr->internal_req_data_phys; req->length = TW_CLI_SECTOR_SIZE; @@ -821,7 +801,7 @@ tw_cli_get_param(struct tw_cli_ctlr_cont goto out; } tw_osl_memcpy(param_data, param->data, param_size); - ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY; + ctlr->internal_req_busy = TW_CL_FALSE; tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q); } else { /* There's a call back. Simply submit the command. */ @@ -838,7 +818,7 @@ out: "get_param failed", "error = %d", error); if (param) - ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY; + ctlr->internal_req_busy = TW_CL_FALSE; if (req) tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q); return(1); @@ -878,11 +858,11 @@ tw_cli_set_param(struct tw_cli_ctlr_cont goto out; /* Make sure this is the only CL internal request at this time. */ - if (ctlr->state & TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY) { + if (ctlr->internal_req_busy) { error = TW_OSL_EBUSY; goto out; } - ctlr->state |= TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY; + ctlr->internal_req_busy = TW_CL_TRUE; req->data = ctlr->internal_req_data; req->data_phys = ctlr->internal_req_data_phys; req->length = TW_CLI_SECTOR_SIZE; @@ -939,7 +919,7 @@ tw_cli_set_param(struct tw_cli_ctlr_cont &(req->cmd_pkt->cmd_hdr)); goto out; } - ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY; + ctlr->internal_req_busy = TW_CL_FALSE; tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q); } else { /* There's a call back. Simply submit the command. */ @@ -956,7 +936,7 @@ out: "set_param failed", "error = %d", error); if (param) - ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY; + ctlr->internal_req_busy = TW_CL_FALSE; if (req) tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q); return(error); @@ -1054,8 +1034,11 @@ tw_cli_submit_and_poll_request(struct tw * taking care of it). */ tw_cli_req_q_remove_item(req, TW_CLI_PENDING_Q); + if ((TW_CL_Q_FIRST_ITEM(&(ctlr->req_q_head[TW_CLI_PENDING_Q]))) == TW_CL_NULL) + TW_CLI_WRITE_CONTROL_REGISTER(ctlr->ctlr_handle, + TWA_CONTROL_MASK_COMMAND_INTERRUPT); if (req->data) - ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY; + ctlr->internal_req_busy = TW_CL_FALSE; tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q); } @@ -1079,12 +1062,16 @@ tw_cl_reset_ctlr(struct tw_cl_ctlr_handl { struct tw_cli_ctlr_context *ctlr = (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt); + struct twa_softc *sc = ctlr_handle->osl_ctlr_ctxt; TW_INT32 reset_attempt = 1; TW_INT32 error; tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(), "entered"); - ctlr->state |= TW_CLI_CTLR_STATE_RESET_IN_PROGRESS; + ctlr->reset_in_progress = TW_CL_TRUE; + xpt_freeze_simq(sc->sim, 1); + + tw_cli_disable_interrupts(ctlr); /* * Error back all requests in the complete, busy, and pending queues. @@ -1098,8 +1085,8 @@ tw_cl_reset_ctlr(struct tw_cl_ctlr_handl tw_cli_drain_complete_queue(ctlr); tw_cli_drain_busy_queue(ctlr); tw_cli_drain_pending_queue(ctlr); - - tw_cli_disable_interrupts(ctlr); + ctlr->internal_req_busy = TW_CL_FALSE; + ctlr->get_more_aens = TW_CL_FALSE; /* Soft reset the controller. */ try_reset: @@ -1135,7 +1122,9 @@ try_reset: " "); out: - ctlr->state &= ~TW_CLI_CTLR_STATE_RESET_IN_PROGRESS; + ctlr->reset_in_progress = TW_CL_FALSE; + xpt_release_simq(sc->sim, 1); + /* * Enable interrupts, and also clear attention and response interrupts. */ @@ -1163,6 +1152,8 @@ tw_cli_soft_reset(struct tw_cli_ctlr_con { struct tw_cl_ctlr_handle *ctlr_handle = ctlr->ctlr_handle; TW_UINT32 status_reg; + int found; + int loop_count; TW_UINT32 error; tw_cli_dbg_printf(1, ctlr_handle, tw_osl_cur_func(), "entered"); @@ -1192,12 +1183,27 @@ tw_cli_soft_reset(struct tw_cli_ctlr_con * make sure we don't access any hardware registers (for * polling) during that window. */ - ctlr->state |= TW_CLI_CTLR_STATE_RESET_PHASE1_IN_PROGRESS; - while (tw_cli_find_response(ctlr, - TWA_RESET_PHASE1_NOTIFICATION_RESPONSE) != TW_OSL_ESUCCESS) + ctlr->reset_phase1_in_progress = TW_CL_TRUE; + loop_count = 0; + do { + found = (tw_cli_find_response(ctlr, TWA_RESET_PHASE1_NOTIFICATION_RESPONSE) == TW_OSL_ESUCCESS); tw_osl_delay(10); + loop_count++; + error = 0x7888; + } while (!found && (loop_count < 6000000)); /* Loop for no more than 60 seconds */ + + if (!found) { + tw_cl_create_event(ctlr_handle, TW_CL_TRUE, + TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT, + 0x1109, 0x1, TW_CL_SEVERITY_ERROR_STRING, + "Missed firmware handshake after soft-reset", + "error = %d", error); + tw_osl_free_lock(ctlr_handle, ctlr->io_lock); + return(error); + } + tw_osl_delay(TWA_RESET_PHASE1_WAIT_TIME_MS * 1000); - ctlr->state &= ~TW_CLI_CTLR_STATE_RESET_PHASE1_IN_PROGRESS; + ctlr->reset_phase1_in_progress = TW_CL_FALSE; } if ((error = tw_cli_poll_status(ctlr, @@ -1285,9 +1291,9 @@ tw_cli_send_scsi_cmd(struct tw_cli_req_c tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered"); /* Make sure this is the only CL internal request at this time. */ - if (ctlr->state & TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY) + if (ctlr->internal_req_busy) return(TW_OSL_EBUSY); - ctlr->state |= TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY; + ctlr->internal_req_busy = TW_CL_TRUE; req->data = ctlr->internal_req_data; req->data_phys = ctlr->internal_req_data_phys; tw_osl_memzero(req->data, TW_CLI_SECTOR_SIZE); @@ -1365,7 +1371,7 @@ tw_cli_get_aen(struct tw_cli_ctlr_contex "Could not send SCSI command", "request = %p, error = %d", req, error); if (req->data) - ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY; + ctlr->internal_req_busy = TW_CL_FALSE; tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q); } return(error); Modified: head/sys/dev/twa/tw_cl_misc.c ============================================================================== --- head/sys/dev/twa/tw_cl_misc.c Wed Jun 9 20:20:24 2010 (r208968) +++ head/sys/dev/twa/tw_cl_misc.c Wed Jun 9 21:40:38 2010 (r208969) @@ -368,14 +368,14 @@ tw_cli_drain_aen_queue(struct tw_cli_ctl if (aen_code == TWA_AEN_SYNC_TIME_WITH_HOST) continue; - ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY; + ctlr->internal_req_busy = TW_CL_FALSE; tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q); } out: if (req) { if (req->data) - ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY; + ctlr->internal_req_busy = TW_CL_FALSE; tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q); } return(error); @@ -447,34 +447,7 @@ tw_cli_poll_status(struct tw_cli_ctlr_co /* got the required bit(s) */ return(TW_OSL_ESUCCESS); - /* - * The OSL should not define TW_OSL_CAN_SLEEP if it calls - * tw_cl_deferred_interrupt from within the ISR and not a - * lower interrupt level, since, in that case, we might end - * up here, and try to sleep (within an ISR). - */ -#ifndef TW_OSL_CAN_SLEEP - /* OSL doesn't support sleeping; will spin. */ tw_osl_delay(1000); -#else /* TW_OSL_CAN_SLEEP */ -#if 0 - /* Will spin if initializing, sleep otherwise. */ - if (!(ctlr->state & TW_CLI_CTLR_STATE_ACTIVE)) - tw_osl_delay(1000); - else - tw_osl_sleep(ctlr->ctlr_handle, - &(ctlr->sleep_handle), 1 /* ms */); -#else /* #if 0 */ - /* - * Will always spin for now (since reset holds a spin lock). - * We could free io_lock after the call to TW_CLI_SOFT_RESET, - * so we could sleep here. To block new requests (since - * the lock will have been released) we could use the - * ...RESET_IN_PROGRESS flag. Need to revisit. - */ - tw_osl_delay(1000); -#endif /* #if 0 */ -#endif /* TW_OSL_CAN_SLEEP */ } while (tw_osl_get_local_time() <= end_time); return(TW_OSL_ETIMEDOUT); @@ -736,22 +709,20 @@ tw_cli_check_ctlr_state(struct tw_cli_ct tw_cli_dbg_printf(8, ctlr->ctlr_handle, tw_osl_cur_func(), "entered"); /* Check if the 'micro-controller ready' bit is not set. */ - if ((status_reg & TWA_STATUS_EXPECTED_BITS) != - TWA_STATUS_EXPECTED_BITS) { + if (!(status_reg & TWA_STATUS_MICROCONTROLLER_READY)) { TW_INT8 desc[200]; tw_osl_memzero(desc, 200); - if ((status_reg & TWA_STATUS_MICROCONTROLLER_READY) || - (!(ctlr->state & - TW_CLI_CTLR_STATE_RESET_PHASE1_IN_PROGRESS))) { + if (!(ctlr->reset_phase1_in_progress)) { tw_cl_create_event(ctlr_handle, TW_CL_TRUE, TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT, 0x1301, 0x1, TW_CL_SEVERITY_ERROR_STRING, "Missing expected status bit(s)", "status reg = 0x%x; Missing bits: %s", status_reg, - tw_cli_describe_bits (~status_reg & - TWA_STATUS_EXPECTED_BITS, desc)); + tw_cli_describe_bits( + TWA_STATUS_MICROCONTROLLER_READY, + desc)); error = TW_OSL_EGENFAILURE; } } @@ -765,7 +736,7 @@ tw_cli_check_ctlr_state(struct tw_cli_ct /* Skip queue error msgs during 9650SE/9690SA reset */ if (((ctlr->device_id != TW_CL_DEVICE_ID_9K_E) && (ctlr->device_id != TW_CL_DEVICE_ID_9K_SA)) || - ((ctlr->state & TW_CLI_CTLR_STATE_RESET_IN_PROGRESS) == 0) || + (!(ctlr->reset_in_progress)) || ((status_reg & TWA_STATUS_QUEUE_ERROR_INTERRUPT) == 0)) tw_cl_create_event(ctlr_handle, TW_CL_TRUE, TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT, @@ -819,7 +790,7 @@ tw_cli_check_ctlr_state(struct tw_cli_ct /* Skip queue error msgs during 9650SE/9690SA reset */ if (((ctlr->device_id != TW_CL_DEVICE_ID_9K_E) && (ctlr->device_id != TW_CL_DEVICE_ID_9K_SA)) || - ((ctlr->state & TW_CLI_CTLR_STATE_RESET_IN_PROGRESS) == 0)) + (!(ctlr->reset_in_progress))) tw_cl_create_event(ctlr_handle, TW_CL_TRUE, TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT, 0x1305, 0x1, TW_CL_SEVERITY_ERROR_STRING, @@ -839,7 +810,7 @@ tw_cli_check_ctlr_state(struct tw_cli_ct "status reg = 0x%x %s", status_reg, tw_cli_describe_bits(status_reg, desc)); - error = TW_OSL_EGENFAILURE; + error = TW_OSL_EGENFAILURE; // tw_cl_reset_ctlr(ctlr_handle); } } return(error); Modified: head/sys/dev/twa/tw_cl_share.h ============================================================================== --- head/sys/dev/twa/tw_cl_share.h Wed Jun 9 20:20:24 2010 (r208968) +++ head/sys/dev/twa/tw_cl_share.h Wed Jun 9 21:40:38 2010 (r208969) @@ -76,7 +76,7 @@ * of supporting only 255, since we want to keep one CL internal request * context packet always available for internal requests. */ -#define TW_CL_MAX_SIMULTANEOUS_REQUESTS 0xFF /* max simult reqs supported */ +#define TW_CL_MAX_SIMULTANEOUS_REQUESTS 256 /* max simult reqs supported */ #define TW_CL_MAX_32BIT_SG_ELEMENTS 109 /* max 32-bit sg elements */ #define TW_CL_MAX_64BIT_SG_ELEMENTS 72 /* max 64-bit sg elements */ @@ -144,6 +144,7 @@ struct tw_cl_ctlr_handle { struct tw_cl_req_handle { TW_VOID *osl_req_ctxt; /* OSL's request context */ TW_VOID *cl_req_ctxt; /* CL's request context */ + TW_UINT8 is_io; /* Only freeze/release simq for IOs */ }; @@ -353,12 +354,6 @@ extern TW_VOID tw_osl_ctlr_busy(struct t #endif -#ifndef tw_osl_ctlr_ready -/* Called on cmd interrupt. Allows re-submission of any pending requests. */ -extern TW_VOID tw_osl_ctlr_ready(struct tw_cl_ctlr_handle *ctlr_handle); -#endif - - #ifndef tw_osl_cur_func /* Text name of current function. */ extern TW_INT8 *tw_osl_cur_func(TW_VOID); @@ -528,10 +523,6 @@ extern TW_VOID tw_cl_create_event(struct extern TW_INT32 tw_cl_ctlr_supported(TW_INT32 vendor_id, TW_INT32 device_id); -/* Deferred interrupt handler. */ -extern TW_VOID tw_cl_deferred_interrupt(struct tw_cl_ctlr_handle *ctlr_handle); - - /* Submit a firmware cmd packet. */ extern TW_INT32 tw_cl_fw_passthru(struct tw_cl_ctlr_handle *ctlr_handle, struct tw_cl_req_packet *req_pkt, struct tw_cl_req_handle *req_handle); Modified: head/sys/dev/twa/tw_osl.h ============================================================================== --- head/sys/dev/twa/tw_osl.h Wed Jun 9 20:20:24 2010 (r208968) +++ head/sys/dev/twa/tw_osl.h Wed Jun 9 21:40:38 2010 (r208969) @@ -50,13 +50,11 @@ #define TW_OSLI_DEVICE_NAME "3ware 9000 series Storage Controller" #define TW_OSLI_MALLOC_CLASS M_TWA -#define TW_OSLI_MAX_NUM_IOS TW_CL_MAX_SIMULTANEOUS_REQUESTS +#define TW_OSLI_MAX_NUM_REQUESTS TW_CL_MAX_SIMULTANEOUS_REQUESTS +/* Reserve two command packets. One for ioctls and one for AENs */ +#define TW_OSLI_MAX_NUM_IOS (TW_OSLI_MAX_NUM_REQUESTS - 2) #define TW_OSLI_MAX_NUM_AENS 0x100 -/* Disabled, doesn't work yet. -#define TW_OSLI_DEFERRED_INTR_USED -*/ - #ifdef PAE #define TW_OSLI_DMA_BOUNDARY (1u << 31) #else @@ -80,10 +78,6 @@ #define TW_OSLI_REQ_FLAGS_PASSTHRU (1<<5) /* pass through request */ #define TW_OSLI_REQ_FLAGS_SLEEPING (1<<6) /* owner sleeping on this cmd */ -/* Possible values of sc->state. */ -#define TW_OSLI_CTLR_STATE_OPEN (1<<0) /* control device is open */ -#define TW_OSLI_CTLR_STATE_SIMQ_FROZEN (1<<1) /* simq frozen */ - #ifdef TW_OSL_DEBUG struct tw_osli_q_stats { @@ -101,6 +95,8 @@ struct tw_osli_q_stats { /* Driver's request packet. */ struct tw_osli_req_context { struct tw_cl_req_handle req_handle;/* tag to track req b/w OSL & CL */ + struct mtx ioctl_wake_timeout_lock_handle;/* non-spin lock used to detect ioctl timeout */ + struct mtx *ioctl_wake_timeout_lock;/* ptr to above lock */ struct twa_softc *ctlr; /* ptr to OSL's controller context */ TW_VOID *data; /* ptr to data being passed to CL */ TW_UINT32 length; /* length of buf being passed to CL */ @@ -130,10 +126,10 @@ struct tw_osli_req_context { /* Per-controller structure. */ struct twa_softc { struct tw_cl_ctlr_handle ctlr_handle; - struct tw_osli_req_context *req_ctxt_buf; + struct tw_osli_req_context *req_ctx_buf; /* Controller state. */ - TW_UINT32 state; + TW_UINT8 open; TW_UINT32 flags; TW_INT32 device_id; Modified: head/sys/dev/twa/tw_osl_cam.c ============================================================================== --- head/sys/dev/twa/tw_osl_cam.c Wed Jun 9 20:20:24 2010 (r208968) +++ head/sys/dev/twa/tw_osl_cam.c Wed Jun 9 21:40:38 2010 (r208969) @@ -81,7 +81,7 @@ tw_osli_cam_attach(struct twa_softc *sc) /* * Create the device queue for our SIM. */ - if ((devq = cam_simq_alloc(TW_OSLI_MAX_NUM_IOS)) == NULL) { + if ((devq = cam_simq_alloc(TW_OSLI_MAX_NUM_REQUESTS)) == NULL) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, @@ -92,15 +92,15 @@ tw_osli_cam_attach(struct twa_softc *sc) } /* - * Create a SIM entry. Though we can support TW_OSLI_MAX_NUM_IOS + * Create a SIM entry. Though we can support TW_OSLI_MAX_NUM_REQUESTS * simultaneous requests, we claim to be able to handle only - * (TW_OSLI_MAX_NUM_IOS - 1), so that we always have a request - * packet available to service ioctls. + * TW_OSLI_MAX_NUM_IOS (two less), so that we always have a request + * packet available to service ioctls and AENs. */ tw_osli_dbg_dprintf(3, sc, "Calling cam_sim_alloc"); sc->sim = cam_sim_alloc(twa_action, twa_poll, "twa", sc, device_get_unit(sc->bus_dev), sc->sim_lock, - TW_OSLI_MAX_NUM_IOS - 1, 1, devq); + TW_OSLI_MAX_NUM_IOS, 1, devq); if (sc->sim == NULL) { cam_simq_free(devq); tw_osli_printf(sc, "error = %d", @@ -168,14 +168,6 @@ tw_osli_cam_detach(struct twa_softc *sc) { tw_osli_dbg_dprintf(3, sc, "entered"); -#ifdef TW_OSLI_DEFERRED_INTR_USED - /* - drain the taskqueue - Ctrl is already went down so, no more enqueuetask will - happen . Don't hold any locks, that task might need. - */ - - taskqueue_drain(taskqueue_fast, &(sc->deferred_intr_callback)); -#endif mtx_lock(sc->sim_lock); if (sc->path) @@ -236,7 +228,7 @@ tw_osli_execute_scsi(struct tw_osli_req_ TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2105, "Physical CDB address!"); - ccb_h->status = CAM_REQ_CMP_ERR; + ccb_h->status = CAM_REQ_INVALID; xpt_done(ccb); return(1); } *** DIFF OUTPUT TRUNCATED AT 1000 LINES ***