From owner-svn-src-user@FreeBSD.ORG Wed Jan 13 08:21:19 2010 Return-Path: Delivered-To: svn-src-user@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:4f8:fff6::34]) by hub.freebsd.org (Postfix) with ESMTP id B5CB5106566C; Wed, 13 Jan 2010 08:21:19 +0000 (UTC) (envelope-from kmacy@FreeBSD.org) Received: from svn.freebsd.org (svn.freebsd.org [IPv6:2001:4f8:fff6::2c]) by mx1.freebsd.org (Postfix) with ESMTP id A43948FC16; Wed, 13 Jan 2010 08:21:19 +0000 (UTC) Received: from svn.freebsd.org (localhost [127.0.0.1]) by svn.freebsd.org (8.14.3/8.14.3) with ESMTP id o0D8LJIM049378; Wed, 13 Jan 2010 08:21:19 GMT (envelope-from kmacy@svn.freebsd.org) Received: (from kmacy@localhost) by svn.freebsd.org (8.14.3/8.14.3/Submit) id o0D8LJwV049376; Wed, 13 Jan 2010 08:21:19 GMT (envelope-from kmacy@svn.freebsd.org) Message-Id: <201001130821.o0D8LJwV049376@svn.freebsd.org> From: Kip Macy Date: Wed, 13 Jan 2010 08:21:19 +0000 (UTC) To: src-committers@freebsd.org, svn-src-user@freebsd.org X-SVN-Group: user MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Cc: Subject: svn commit: r202184 - user/kmacy/releng_8_rump/lib/libunet X-BeenThere: svn-src-user@freebsd.org X-Mailman-Version: 2.1.5 Precedence: list List-Id: "SVN commit messages for the experimental " user" src tree" List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 13 Jan 2010 08:21:19 -0000 Author: kmacy Date: Wed Jan 13 08:21:19 2010 New Revision: 202184 URL: http://svn.freebsd.org/changeset/base/202184 Log: - copy in kern_intr.c as unet_kern_intr.c - eliminate cpu binding - remove some unneeded references to scheduler code Modified: user/kmacy/releng_8_rump/lib/libunet/unet_kern_intr.c Modified: user/kmacy/releng_8_rump/lib/libunet/unet_kern_intr.c ============================================================================== --- user/kmacy/releng_8_rump/lib/libunet/unet_kern_intr.c Wed Jan 13 07:17:16 2010 (r202183) +++ user/kmacy/releng_8_rump/lib/libunet/unet_kern_intr.c Wed Jan 13 08:21:19 2010 (r202184) @@ -1,3 +1,29 @@ +/*- + * Copyright (c) 1997, Stefan Esser + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice unmodified, this list of conditions, and the following + * disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + #include __FBSDID("$FreeBSD$"); @@ -26,10 +52,249 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include +#include +#include +#include +#include +#ifdef DDB +#include +#include +#endif + + +#include + +/* + * Describe an interrupt thread. There is one of these per interrupt event. + */ +struct intr_thread { + struct intr_event *it_event; + struct thread *it_thread; /* Kernel thread. */ + int it_flags; /* (j) IT_* flags. */ + int it_need; /* Needs service. */ +}; + +/* Interrupt thread flags kept in it_flags */ +#define IT_DEAD 0x000001 /* Thread is waiting to exit. */ + +struct intr_entropy { + struct thread *td; + uintptr_t event; +}; + + +void +critical_enter(void) +{ +/* grab hashed lock */ +} + +void +critical_exit(void) +{ +} + struct intr_event *clk_intr_event; +struct intr_event *tty_intr_event; +void *vm_ih; +struct proc *intrproc; + +static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads"); + +static int intr_storm_threshold = 1000; +static TAILQ_HEAD(, intr_event) event_list = + TAILQ_HEAD_INITIALIZER(event_list); +static struct mtx event_lock; +MTX_SYSINIT(intr_event_list, &event_lock, "intr event list", MTX_DEF); + +static void intr_event_update(struct intr_event *ie); +#ifdef INTR_FILTER +static int intr_event_schedule_thread(struct intr_event *ie, + struct intr_thread *ithd); +static int intr_filter_loop(struct intr_event *ie, + struct trapframe *frame, struct intr_thread **ithd); +static struct intr_thread *ithread_create(const char *name, + struct intr_handler *ih); +#else +static int intr_event_schedule_thread(struct intr_event *ie); +static struct intr_thread *ithread_create(const char *name); +#endif +static void ithread_destroy(struct intr_thread *ithread); +static void ithread_execute_handlers(struct proc *p, + struct intr_event *ie); +#ifdef INTR_FILTER +static void priv_ithread_execute_handler(struct proc *p, + struct intr_handler *ih); +#endif +static void ithread_loop(void *); +static void ithread_update(struct intr_thread *ithd); +static void start_softintr(void *); + +/* Map an interrupt type to an ithread priority. */ +u_char +intr_priority(enum intr_type flags) +{ + u_char pri; + + flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET | + INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV); + switch (flags) { + case INTR_TYPE_TTY: + pri = PI_TTYLOW; + break; + case INTR_TYPE_BIO: + /* + * XXX We need to refine this. BSD/OS distinguishes + * between tape and disk priorities. + */ + pri = PI_DISK; + break; + case INTR_TYPE_NET: + pri = PI_NET; + break; + case INTR_TYPE_CAM: + pri = PI_DISK; /* XXX or PI_CAM? */ + break; + case INTR_TYPE_AV: /* Audio/video */ + pri = PI_AV; + break; + case INTR_TYPE_CLK: + pri = PI_REALTIME; + break; + case INTR_TYPE_MISC: + pri = PI_DULL; /* don't care */ + break; + default: + /* We didn't specify an interrupt level. */ + panic("intr_priority: no interrupt type in flags"); + } + + return pri; +} + +/* + * Update an ithread based on the associated intr_event. + */ +static void +ithread_update(struct intr_thread *ithd) +{ + struct intr_event *ie; + struct thread *td; + u_char pri; + + ie = ithd->it_event; + td = ithd->it_thread; + + /* Determine the overall priority of this event. */ + if (TAILQ_EMPTY(&ie->ie_handlers)) + pri = PRI_MAX_ITHD; + else + pri = TAILQ_FIRST(&ie->ie_handlers)->ih_pri; + + /* Update name and priority. */ + strlcpy(td->td_name, ie->ie_fullname, sizeof(td->td_name)); + + /* XXX set priority */ +} + +/* + * Regenerate the full name of an interrupt event and update its priority. + */ +static void +intr_event_update(struct intr_event *ie) +{ + struct intr_handler *ih; + char *last; + int missed, space; + + /* Start off with no entropy and just the name of the event. */ + mtx_assert(&ie->ie_lock, MA_OWNED); + strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); + ie->ie_flags &= ~IE_ENTROPY; + missed = 0; + space = 1; + + /* Run through all the handlers updating values. */ + TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { + if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 < + sizeof(ie->ie_fullname)) { + strcat(ie->ie_fullname, " "); + strcat(ie->ie_fullname, ih->ih_name); + space = 0; + } else + missed++; + if (ih->ih_flags & IH_ENTROPY) + ie->ie_flags |= IE_ENTROPY; + } + /* + * If the handler names were too long, add +'s to indicate missing + * names. If we run out of room and still have +'s to add, change + * the last character from a + to a *. + */ + last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2]; + while (missed-- > 0) { + if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) { + if (*last == '+') { + *last = '*'; + break; + } else + *last = '+'; + } else if (space) { + strcat(ie->ie_fullname, " +"); + space = 0; + } else + strcat(ie->ie_fullname, "+"); + } + + /* + * If this event has an ithread, update it's priority and + * name. + */ + if (ie->ie_thread != NULL) + ithread_update(ie->ie_thread); + CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname); +} + +int +intr_event_create(struct intr_event **event, void *source, int flags, int irq, + void (*pre_ithread)(void *), void (*post_ithread)(void *), + void (*post_filter)(void *), int (*assign_cpu)(void *, u_char), + const char *fmt, ...) +{ + struct intr_event *ie; + va_list ap; + + /* The only valid flag during creation is IE_SOFT. */ + if ((flags & ~IE_SOFT) != 0) + return (EINVAL); + ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO); + ie->ie_source = source; + ie->ie_pre_ithread = pre_ithread; + ie->ie_post_ithread = post_ithread; + ie->ie_post_filter = post_filter; + ie->ie_assign_cpu = assign_cpu; + ie->ie_flags = flags; + ie->ie_irq = irq; + ie->ie_cpu = NOCPU; + TAILQ_INIT(&ie->ie_handlers); + mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF); + + va_start(ap, fmt); + vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap); + va_end(ap); + strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); + mtx_lock(&event_lock); + TAILQ_INSERT_TAIL(&event_list, ie, ie_list); + mtx_unlock(&event_lock); + if (event != NULL) + *event = ie; + CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name); + return (0); +} /* * Bind an interrupt event to the specified CPU. Note that not all @@ -43,9 +308,590 @@ int intr_event_bind(struct intr_event *ie, u_char cpu) { - return (ENOTSUP); + return (EOPNOTSUPP); +} + +static struct intr_event * +intr_lookup(int irq) +{ + struct intr_event *ie; + + mtx_lock(&event_lock); + TAILQ_FOREACH(ie, &event_list, ie_list) + if (ie->ie_irq == irq && + (ie->ie_flags & IE_SOFT) == 0 && + TAILQ_FIRST(&ie->ie_handlers) != NULL) + break; + mtx_unlock(&event_lock); + return (ie); +} + +int +intr_event_destroy(struct intr_event *ie) +{ + + mtx_lock(&event_lock); + mtx_lock(&ie->ie_lock); + if (!TAILQ_EMPTY(&ie->ie_handlers)) { + mtx_unlock(&ie->ie_lock); + mtx_unlock(&event_lock); + return (EBUSY); + } + TAILQ_REMOVE(&event_list, ie, ie_list); +#ifndef notyet + if (ie->ie_thread != NULL) { + ithread_destroy(ie->ie_thread); + ie->ie_thread = NULL; + } +#endif + mtx_unlock(&ie->ie_lock); + mtx_unlock(&event_lock); + mtx_destroy(&ie->ie_lock); + free(ie, M_ITHREAD); + return (0); +} + +#ifndef INTR_FILTER +static struct intr_thread * +ithread_create(const char *name) +{ + struct intr_thread *ithd; + struct thread *td; + int error; + + ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); + + error = kproc_kthread_add(ithread_loop, ithd, &intrproc, + &td, RFSTOPPED | RFHIGHPID, + 0, "intr", "%s", name); + if (error) + panic("kproc_create() failed with %d", error); + td->td_pflags |= TDP_ITHREAD; + ithd->it_thread = td; + CTR2(KTR_INTR, "%s: created %s", __func__, name); + return (ithd); +} +#else +static struct intr_thread * +ithread_create(const char *name, struct intr_handler *ih) +{ + struct intr_thread *ithd; + struct thread *td; + int error; + + ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); + + error = kproc_kthread_add(ithread_loop, ih, &intrproc, + &td, RFSTOPPED | RFHIGHPID, + 0, "intr", "%s", name); + if (error) + panic("kproc_create() failed with %d", error); + td->td_pflags |= TDP_ITHREAD; + ithd->it_thread = td; + CTR2(KTR_INTR, "%s: created %s", __func__, name); + return (ithd); +} +#endif + +static void +ithread_destroy(struct intr_thread *ithread) +{ + ; + +} + +#ifndef INTR_FILTER +int +intr_event_add_handler(struct intr_event *ie, const char *name, + driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, + enum intr_type flags, void **cookiep) +{ + struct intr_handler *ih, *temp_ih; + struct intr_thread *it; + + if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) + return (EINVAL); + + /* Allocate and populate an interrupt handler structure. */ + ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); + ih->ih_filter = filter; + ih->ih_handler = handler; + ih->ih_argument = arg; + ih->ih_name = name; + ih->ih_event = ie; + ih->ih_pri = pri; + if (flags & INTR_EXCL) + ih->ih_flags = IH_EXCLUSIVE; + if (flags & INTR_MPSAFE) + ih->ih_flags |= IH_MPSAFE; + if (flags & INTR_ENTROPY) + ih->ih_flags |= IH_ENTROPY; + + /* We can only have one exclusive handler in a event. */ + mtx_lock(&ie->ie_lock); + if (!TAILQ_EMPTY(&ie->ie_handlers)) { + if ((flags & INTR_EXCL) || + (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { + mtx_unlock(&ie->ie_lock); + free(ih, M_ITHREAD); + return (EINVAL); + } + } + + /* Add the new handler to the event in priority order. */ + TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) { + if (temp_ih->ih_pri > ih->ih_pri) + break; + } + if (temp_ih == NULL) + TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next); + else + TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next); + intr_event_update(ie); + + /* Create a thread if we need one. */ + while (ie->ie_thread == NULL && handler != NULL) { + if (ie->ie_flags & IE_ADDING_THREAD) + msleep(ie, &ie->ie_lock, 0, "ithread", 0); + else { + ie->ie_flags |= IE_ADDING_THREAD; + mtx_unlock(&ie->ie_lock); + it = ithread_create("intr: newborn"); + mtx_lock(&ie->ie_lock); + ie->ie_flags &= ~IE_ADDING_THREAD; + ie->ie_thread = it; + it->it_event = ie; + ithread_update(it); + wakeup(ie); + } + } + CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, + ie->ie_name); + mtx_unlock(&ie->ie_lock); + + if (cookiep != NULL) + *cookiep = ih; + return (0); +} +#else +int +intr_event_add_handler(struct intr_event *ie, const char *name, + driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, + enum intr_type flags, void **cookiep) +{ + struct intr_handler *ih, *temp_ih; + struct intr_thread *it; + + if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) + return (EINVAL); + + /* Allocate and populate an interrupt handler structure. */ + ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); + ih->ih_filter = filter; + ih->ih_handler = handler; + ih->ih_argument = arg; + ih->ih_name = name; + ih->ih_event = ie; + ih->ih_pri = pri; + if (flags & INTR_EXCL) + ih->ih_flags = IH_EXCLUSIVE; + if (flags & INTR_MPSAFE) + ih->ih_flags |= IH_MPSAFE; + if (flags & INTR_ENTROPY) + ih->ih_flags |= IH_ENTROPY; + + /* We can only have one exclusive handler in a event. */ + mtx_lock(&ie->ie_lock); + if (!TAILQ_EMPTY(&ie->ie_handlers)) { + if ((flags & INTR_EXCL) || + (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { + mtx_unlock(&ie->ie_lock); + free(ih, M_ITHREAD); + return (EINVAL); + } + } + + /* Add the new handler to the event in priority order. */ + TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) { + if (temp_ih->ih_pri > ih->ih_pri) + break; + } + if (temp_ih == NULL) + TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next); + else + TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next); + intr_event_update(ie); + + /* For filtered handlers, create a private ithread to run on. */ + if (filter != NULL && handler != NULL) { + mtx_unlock(&ie->ie_lock); + it = ithread_create("intr: newborn", ih); + mtx_lock(&ie->ie_lock); + it->it_event = ie; + ih->ih_thread = it; + ithread_update(it); // XXX - do we really need this?!?!? + } else { /* Create the global per-event thread if we need one. */ + while (ie->ie_thread == NULL && handler != NULL) { + if (ie->ie_flags & IE_ADDING_THREAD) + msleep(ie, &ie->ie_lock, 0, "ithread", 0); + else { + ie->ie_flags |= IE_ADDING_THREAD; + mtx_unlock(&ie->ie_lock); + it = ithread_create("intr: newborn", ih); + mtx_lock(&ie->ie_lock); + ie->ie_flags &= ~IE_ADDING_THREAD; + ie->ie_thread = it; + it->it_event = ie; + ithread_update(it); + wakeup(ie); + } + } + } + CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, + ie->ie_name); + mtx_unlock(&ie->ie_lock); + + if (cookiep != NULL) + *cookiep = ih; + return (0); +} +#endif + +/* + * Return the ie_source field from the intr_event an intr_handler is + * associated with. + */ +void * +intr_handler_source(void *cookie) +{ + struct intr_handler *ih; + struct intr_event *ie; + + ih = (struct intr_handler *)cookie; + if (ih == NULL) + return (NULL); + ie = ih->ih_event; + KASSERT(ie != NULL, + ("interrupt handler \"%s\" has a NULL interrupt event", + ih->ih_name)); + return (ie->ie_source); +} + +#ifndef INTR_FILTER +int +intr_event_remove_handler(void *cookie) +{ + struct intr_handler *handler = (struct intr_handler *)cookie; + struct intr_event *ie; +#ifdef INVARIANTS + struct intr_handler *ih; +#endif +#ifdef notyet + int dead; +#endif + + if (handler == NULL) + return (EINVAL); + ie = handler->ih_event; + KASSERT(ie != NULL, + ("interrupt handler \"%s\" has a NULL interrupt event", + handler->ih_name)); + mtx_lock(&ie->ie_lock); + CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, + ie->ie_name); +#ifdef INVARIANTS + TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) + if (ih == handler) + goto ok; + mtx_unlock(&ie->ie_lock); + panic("interrupt handler \"%s\" not found in interrupt event \"%s\"", + ih->ih_name, ie->ie_name); +ok: +#endif + /* + * If there is no ithread, then just remove the handler and return. + * XXX: Note that an INTR_FAST handler might be running on another + * CPU! + */ + if (ie->ie_thread == NULL) { + TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); + mtx_unlock(&ie->ie_lock); + free(handler, M_ITHREAD); + return (0); + } + + /* + * If the interrupt thread is already running, then just mark this + * handler as being dead and let the ithread do the actual removal. + * + * During a cold boot while cold is set, msleep() does not sleep, + * so we have to remove the handler here rather than letting the + * thread do it. + */ + thread_lock(ie->ie_thread->it_thread); + if (!TD_AWAITING_INTR(ie->ie_thread->it_thread) && !cold) { + handler->ih_flags |= IH_DEAD; + + /* + * Ensure that the thread will process the handler list + * again and remove this handler if it has already passed + * it on the list. + */ + ie->ie_thread->it_need = 1; + } else + TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); + thread_unlock(ie->ie_thread->it_thread); + while (handler->ih_flags & IH_DEAD) + msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); + intr_event_update(ie); +#ifdef notyet + /* + * XXX: This could be bad in the case of ppbus(8). Also, I think + * this could lead to races of stale data when servicing an + * interrupt. + */ + dead = 1; + TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { + if (!(ih->ih_flags & IH_FAST)) { + dead = 0; + break; + } + } + if (dead) { + ithread_destroy(ie->ie_thread); + ie->ie_thread = NULL; + } +#endif + mtx_unlock(&ie->ie_lock); + free(handler, M_ITHREAD); + return (0); +} + +static int +intr_event_schedule_thread(struct intr_event *ie) +{ + struct intr_entropy entropy; + struct intr_thread *it; + struct thread *td; + struct thread *ctd; + struct proc *p; + + /* + * If no ithread or no handlers, then we have a stray interrupt. + */ + if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || + ie->ie_thread == NULL) + return (EINVAL); + + ctd = curthread; + it = ie->ie_thread; + td = it->it_thread; + p = td->td_proc; + + /* + * If any of the handlers for this ithread claim to be good + * sources of entropy, then gather some. + */ + if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) { + CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__, + p->p_pid, td->td_name); + entropy.event = (uintptr_t)ie; + entropy.td = ctd; + random_harvest(&entropy, sizeof(entropy), 2, 0, + RANDOM_INTERRUPT); + } + + KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name)); + + /* + * Set it_need to tell the thread to keep running if it is already + * running. Then, lock the thread and see if we actually need to + * put it on the runqueue. + */ + it->it_need = 1; +#ifdef notyet + /* XXX */ + thread_lock(td); + if (TD_AWAITING_INTR(td)) { + CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, + td->td_name); + TD_CLR_IWAIT(td); + sched_add(td, SRQ_INTR); + } else { + CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", + __func__, p->p_pid, td->td_name, it->it_need, td->td_state); + } + thread_unlock(td); +#endif + return (0); +} +#else +int +intr_event_remove_handler(void *cookie) +{ + struct intr_handler *handler = (struct intr_handler *)cookie; + struct intr_event *ie; + struct intr_thread *it; +#ifdef INVARIANTS + struct intr_handler *ih; +#endif +#ifdef notyet + int dead; +#endif + + if (handler == NULL) + return (EINVAL); + ie = handler->ih_event; + KASSERT(ie != NULL, + ("interrupt handler \"%s\" has a NULL interrupt event", + handler->ih_name)); + mtx_lock(&ie->ie_lock); + CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, + ie->ie_name); +#ifdef INVARIANTS + TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) + if (ih == handler) + goto ok; + mtx_unlock(&ie->ie_lock); + panic("interrupt handler \"%s\" not found in interrupt event \"%s\"", + ih->ih_name, ie->ie_name); +ok: +#endif + /* + * If there are no ithreads (per event and per handler), then + * just remove the handler and return. + * XXX: Note that an INTR_FAST handler might be running on another CPU! + */ + if (ie->ie_thread == NULL && handler->ih_thread == NULL) { + TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); + mtx_unlock(&ie->ie_lock); + free(handler, M_ITHREAD); + return (0); + } + + /* Private or global ithread? */ + it = (handler->ih_thread) ? handler->ih_thread : ie->ie_thread; + /* + * If the interrupt thread is already running, then just mark this + * handler as being dead and let the ithread do the actual removal. + * + * During a cold boot while cold is set, msleep() does not sleep, + * so we have to remove the handler here rather than letting the + * thread do it. + */ + thread_lock(it->it_thread); + if (!TD_AWAITING_INTR(it->it_thread) && !cold) { + handler->ih_flags |= IH_DEAD; + + /* + * Ensure that the thread will process the handler list + * again and remove this handler if it has already passed + * it on the list. + */ + it->it_need = 1; + } else + TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); + thread_unlock(it->it_thread); + while (handler->ih_flags & IH_DEAD) + msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); + /* + * At this point, the handler has been disconnected from the event, + * so we can kill the private ithread if any. + */ + if (handler->ih_thread) { + ithread_destroy(handler->ih_thread); + handler->ih_thread = NULL; + } + intr_event_update(ie); +#ifdef notyet + /* + * XXX: This could be bad in the case of ppbus(8). Also, I think + * this could lead to races of stale data when servicing an + * interrupt. + */ + dead = 1; + TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { + if (handler != NULL) { + dead = 0; + break; + } + } + if (dead) { + ithread_destroy(ie->ie_thread); + ie->ie_thread = NULL; + } +#endif + mtx_unlock(&ie->ie_lock); + free(handler, M_ITHREAD); + return (0); +} + +static int +intr_event_schedule_thread(struct intr_event *ie, struct intr_thread *it) +{ + struct intr_entropy entropy; + struct thread *td; + struct thread *ctd; + struct proc *p; + + /* + * If no ithread or no handlers, then we have a stray interrupt. + */ + if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || it == NULL) + return (EINVAL); + + ctd = curthread; + td = it->it_thread; + p = td->td_proc; + + /* + * If any of the handlers for this ithread claim to be good + * sources of entropy, then gather some. + */ + if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) { + CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__, + p->p_pid, td->td_name); + entropy.event = (uintptr_t)ie; + entropy.td = ctd; + random_harvest(&entropy, sizeof(entropy), 2, 0, + RANDOM_INTERRUPT); + } + + KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name)); + + /* + * Set it_need to tell the thread to keep running if it is already + * running. Then, lock the thread and see if we actually need to + * put it on the runqueue. + */ + it->it_need = 1; + thread_lock(td); + if (TD_AWAITING_INTR(td)) { + CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, + td->td_name); + TD_CLR_IWAIT(td); + sched_add(td, SRQ_INTR); + } else { + CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", + __func__, p->p_pid, td->td_name, it->it_need, td->td_state); + } + thread_unlock(td); + + return (0); } +#endif +/* + * Allow interrupt event binding for software interrupt handlers -- a no-op, + * since interrupts are generated in software rather than being directed by + * a PIC. + */ +static int +swi_assign_cpu(void *arg, u_char cpu) +{ + + return (0); +} /* * Add a software interrupt handler to a specified event. If a given event @@ -55,7 +901,36 @@ int swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler, void *arg, int pri, enum intr_type flags, void **cookiep) { - panic(""); + struct intr_event *ie; + int error; + + if (flags & INTR_ENTROPY) + return (EINVAL); + + ie = (eventp != NULL) ? *eventp : NULL; + + if (ie != NULL) { + if (!(ie->ie_flags & IE_SOFT)) + return (EINVAL); + } else { + error = intr_event_create(&ie, NULL, IE_SOFT, 0, + NULL, NULL, NULL, swi_assign_cpu, "swi%d:", pri); + if (error) + return (error); + if (eventp != NULL) + *eventp = ie; + } + error = intr_event_add_handler(ie, name, NULL, handler, arg, + (pri * RQ_PPQ) + PI_SOFT, flags, cookiep); + if (error) + return (error); + if (pri == SWI_CLOCK) { + struct proc *p; + p = ie->ie_thread->it_thread->td_proc; + PROC_LOCK(p); + p->p_flag |= P_NOLOAD; + PROC_UNLOCK(p); + } return (0); } @@ -65,6 +940,527 @@ swi_add(struct intr_event **eventp, cons void swi_sched(void *cookie, int flags) { + struct intr_handler *ih = (struct intr_handler *)cookie; + struct intr_event *ie = ih->ih_event; + int error; + + CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name, + ih->ih_need); + + /* + * Set ih_need for this handler so that if the ithread is already + * running it will execute this handler on the next pass. Otherwise, + * it will execute it the next time it runs. + */ + atomic_store_rel_int(&ih->ih_need, 1); + + if (!(flags & SWI_DELAY)) { + PCPU_INC(cnt.v_soft); +#ifdef INTR_FILTER + error = intr_event_schedule_thread(ie, ie->ie_thread); +#else + error = intr_event_schedule_thread(ie); +#endif + KASSERT(error == 0, ("stray software interrupt")); + } +} + +/* + * Remove a software interrupt handler. Currently this code does not + * remove the associated interrupt event if it becomes empty. Calling code + * may do so manually via intr_event_destroy(), but that's not really + * an optimal interface. + */ +int +swi_remove(void *cookie) +{ + + return (intr_event_remove_handler(cookie)); +} + +#ifdef INTR_FILTER +static void +priv_ithread_execute_handler(struct proc *p, struct intr_handler *ih) +{ + struct intr_event *ie; + + ie = ih->ih_event; + /* + * If this handler is marked for death, remove it from + * the list of handlers and wake up the sleeper. + */ + if (ih->ih_flags & IH_DEAD) { + mtx_lock(&ie->ie_lock); + TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next); + ih->ih_flags &= ~IH_DEAD; + wakeup(ih); + mtx_unlock(&ie->ie_lock); + return; + } + + /* Execute this handler. */ + CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", + __func__, p->p_pid, (void *)ih->ih_handler, ih->ih_argument, + ih->ih_name, ih->ih_flags); + + if (!(ih->ih_flags & IH_MPSAFE)) + mtx_lock(&Giant); + ih->ih_handler(ih->ih_argument); + if (!(ih->ih_flags & IH_MPSAFE)) + mtx_unlock(&Giant); +} +#endif + +/* + * This is a public function for use by drivers that mux interrupt + * handlers for child devices from their interrupt handler. + */ +void +intr_event_execute_handlers(struct proc *p, struct intr_event *ie) +{ + struct intr_handler *ih, *ihn; + + TAILQ_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) { + /* + * If this handler is marked for death, remove it from + * the list of handlers and wake up the sleeper. *** DIFF OUTPUT TRUNCATED AT 1000 LINES ***