Date: Fri, 3 Oct 2003 00:27:56 -0400 (EDT) From: Jun Su <junsu@m-net.arbornet.org> To: FreeBSD-gnats-submit@FreeBSD.org Subject: kern/57522: [PATCH] New PID allocater algorithm from NetBSD Message-ID: <200310030427.h934RuOA034289@m-net.arbornet.org> Resent-Message-ID: <200310030430.h934U9jo020219@freefall.freebsd.org>
index | next in thread | raw e-mail
>Number: 57522
>Category: kern
>Synopsis: [PATCH] New PID allocater algorithm from NetBSD
>Confidential: no
>Severity: non-critical
>Priority: medium
>Responsible: freebsd-bugs
>State: open
>Quarter:
>Keywords:
>Date-Required:
>Class: change-request
>Submitter-Id: current-users
>Arrival-Date: Thu Oct 02 21:30:09 PDT 2003
>Closed-Date:
>Last-Modified:
>Originator: Jun Su
>Release: FreeBSD Current
>Organization:
>Environment:
System: FreeBSD 5.1 -Current
>Description:
I port the algrithm from NetBSD. The original idea is from David Laight.
Alternative pid/proc allocater, removes all searches associated with pid
lookup and allocation, and any dependency on NPROC or MAXUSERS.
NO_PID changed to -1 (and renamed NO_PGID) to remove artificial limit
on PID_MAX.
>How-To-Repeat:
None
>Fix:
diff -ru2 /usr/src/sys/kern/init_main.c kern/init_main.c
--- /usr/src/sys/kern/init_main.c Thu Oct 2 07:51:53 2003
+++ kern/init_main.c Thu Oct 2 08:14:39 2003
@@ -88,5 +88,5 @@
/* Components of the first process -- never freed. */
static struct session session0;
-static struct pgrp pgrp0;
+struct pgrp pgrp0;
struct proc proc0;
struct thread thread0;
@@ -347,8 +347,6 @@
*/
LIST_INSERT_HEAD(&allproc, p, p_list);
- LIST_INSERT_HEAD(PIDHASH(0), p, p_hash);
mtx_init(&pgrp0.pg_mtx, "process group", NULL, MTX_DEF | MTX_DUPOK);
p->p_pgrp = &pgrp0;
- LIST_INSERT_HEAD(PGRPHASH(0), &pgrp0, pg_hash);
LIST_INIT(&pgrp0.pg_members);
LIST_INSERT_HEAD(&pgrp0.pg_members, p, p_pglist);
diff -ru2 /usr/src/sys/kern/kern_exit.c kern/kern_exit.c
--- /usr/src/sys/kern/kern_exit.c Sun Jun 15 08:31:23 2003
+++ kern/kern_exit.c Wed Oct 1 16:30:06 2003
@@ -398,5 +398,4 @@
LIST_REMOVE(p, p_list);
LIST_INSERT_HEAD(&zombproc, p, p_list);
- LIST_REMOVE(p, p_hash);
sx_xunlock(&allproc_lock);
@@ -697,5 +696,5 @@
KASSERT(FIRST_THREAD_IN_PROC(p),
("wait1: no residual thread!"));
- uma_zfree(proc_zone, p);
+ proc_free(p);
sx_xlock(&allproc_lock);
nprocs--;
diff -ru2 /usr/src/sys/kern/kern_fork.c kern/kern_fork.c
--- /usr/src/sys/kern/kern_fork.c Thu Aug 21 22:32:51 2003
+++ kern/kern_fork.c Thu Oct 2 17:07:13 2003
@@ -200,6 +200,6 @@
uid_t uid;
struct proc *newproc;
- int ok, trypid;
- static int curfail, pidchecked = 0;
+ int ok;
+ static int curfail;
static struct timeval lastfail;
struct filedesc *fd;
@@ -285,5 +285,5 @@
/* Allocate new proc. */
- newproc = uma_zalloc(proc_zone, M_WAITOK);
+ newproc = proc_alloc(flags);
#ifdef MAC
mac_init_proc(newproc);
@@ -323,84 +323,13 @@
nprocs++;
- /*
- * Find an unused process ID. We remember a range of unused IDs
- * ready to use (from lastpid+1 through pidchecked-1).
- *
- * If RFHIGHPID is set (used during system boot), do not allocate
- * low-numbered pids.
- */
- trypid = lastpid + 1;
- if (flags & RFHIGHPID) {
- if (trypid < 10)
- trypid = 10;
- } else {
- if (randompid)
- trypid += arc4random() % randompid;
- }
-retry:
- /*
- * If the process ID prototype has wrapped around,
- * restart somewhat above 0, as the low-numbered procs
- * tend to include daemons that don't exit.
- */
- if (trypid >= PID_MAX) {
- trypid = trypid % PID_MAX;
- if (trypid < 100)
- trypid += 100;
- pidchecked = 0;
- }
- if (trypid >= pidchecked) {
- int doingzomb = 0;
-
- pidchecked = PID_MAX;
- /*
- * Scan the active and zombie procs to check whether this pid
- * is in use. Remember the lowest pid that's greater
- * than trypid, so we can avoid checking for a while.
- */
- p2 = LIST_FIRST(&allproc);
-again:
- for (; p2 != NULL; p2 = LIST_NEXT(p2, p_list)) {
- PROC_LOCK(p2);
- while (p2->p_pid == trypid ||
- p2->p_pgrp->pg_id == trypid ||
- p2->p_session->s_sid == trypid) {
- trypid++;
- if (trypid >= pidchecked) {
- PROC_UNLOCK(p2);
- goto retry;
- }
- }
- if (p2->p_pid > trypid && pidchecked > p2->p_pid)
- pidchecked = p2->p_pid;
- if (p2->p_pgrp->pg_id > trypid &&
- pidchecked > p2->p_pgrp->pg_id)
- pidchecked = p2->p_pgrp->pg_id;
- if (p2->p_session->s_sid > trypid &&
- pidchecked > p2->p_session->s_sid)
- pidchecked = p2->p_session->s_sid;
- PROC_UNLOCK(p2);
- }
- if (!doingzomb) {
- doingzomb = 1;
- p2 = LIST_FIRST(&zombproc);
- goto again;
- }
- }
-
- /*
- * RFHIGHPID does not mess with the lastpid counter during boot.
- */
- if (flags & RFHIGHPID)
- pidchecked = 0;
- else
- lastpid = trypid;
-
p2 = newproc;
- p2->p_state = PRS_NEW; /* protect against others */
- p2->p_pid = trypid;
LIST_INSERT_HEAD(&allproc, p2, p_list);
- LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash);
sx_xunlock(&allproc_lock);
+
+ /*
+ * RFHIGHPID does not mess with the lastpid counter during boot.
+ */
+ if (!(flags & RFHIGHPID))
+ lastpid = p2->p_pid;
/*
diff -ru2 /usr/src/sys/kern/kern_proc.c kern/kern_proc.c
--- /usr/src/sys/kern/kern_proc.c Fri Sep 19 18:49:27 2003
+++ kern/kern_proc.c Thu Oct 2 08:54:48 2003
@@ -42,4 +42,5 @@
#include <sys/param.h>
+#include <sys/unistd.h>
#include <sys/systm.h>
#include <sys/kernel.h>
@@ -84,11 +85,41 @@
static void proc_fini(void *mem, int size);
+/* pid to proc lookup is done by indexing the pid_table array.
+ Since pid numbers are only allocated when an empty slot
+ has been found, there is no need to search any lists ever.
+ (an orphaned pgrp will lock the slot, a session will lock
+ the pgrp with the same number.)
+ If the table is too small it is reallocated with twice the
+ previous size and the entries 'unzipped' into the two halves.
+ A linked list of free entries is passed through the pt_proc
+ field of 'free' items - set odd to be an invalid ptr. */
+
+struct pid_table {
+ struct proc* pt_proc;
+ struct pgrp* pt_pgrp;
+ };
+#if 1/* strongly typed cast - should be a noop */
+static __inline intptr_t p2u(struct proc *p) { return (intptr_t)p; };
+#else
+#define p2u(p) ((intptr_t)p)
+#endif
+#define P_VALID(p) (!(p2u(p) & 1))
+#define P_NEXT(p) (p2u(p) >> 1)
+#define P_FREE(pid) ((struct proc *)((pid) << 1 | 1))
+
+static struct pid_table *pid_table;
+#define INITIAL_PID_TABLE_SIZE (1 << 5)
+
+static uint pid_tbl_mask = (INITIAL_PID_TABLE_SIZE) - 1;/* table size 2^n */
+static uint pid_alloc_lim;/* max we allocate before growing table */
+static uint pid_alloc_cnt = 0;
+/* links through free slots - never empty! */
+static uint next_free_pt, last_free_pt;
+static pid_t pid_max = PID_MAX;/* largest value we alocate */
+
+
/*
* Other process lists
*/
-struct pidhashhead *pidhashtbl;
-u_long pidhash;
-struct pgrphashhead *pgrphashtbl;
-u_long pgrphash;
struct proclist allproc;
struct proclist zombproc;
@@ -110,18 +141,38 @@
/*
- * Initialize global process hashing structures.
+ * Initialize global process mapping structures.
*/
void
procinit()
{
-
+ int i;
sx_init(&allproc_lock, "allproc");
sx_init(&proctree_lock, "proctree");
mtx_init(&pargs_ref_lock, "struct pargs.ref", NULL, MTX_DEF);
mtx_init(&ppeers_lock, "p_peers", NULL, MTX_DEF);
+
+ MALLOC(pid_table, struct pid_table *,
+ INITIAL_PID_TABLE_SIZE * sizeof *pid_table, M_PROC, M_WAITOK);
+
+#define LINK_EMPTY ((PID_MAX + INITIAL_PID_TABLE_SIZE) & ~(INITIAL_PID_TABLE_SIZE - 1))
+ /* Set free list running through table...
+ Preset 'use count' to -1 so we allocate pid 1 next. */
+ for (i = 0; i <= pid_tbl_mask; i++) {
+ pid_table[i].pt_proc = P_FREE(LINK_EMPTY + i + 1);
+ pid_table[i].pt_pgrp = 0;
+ }
+ /* slot 0 is just grabbed */
+ next_free_pt = 1;
+ pid_table[0].pt_proc = &proc0;
+ pid_table[0].pt_pgrp = &pgrp0;
+ /* Need to fix fix last entry. */
+ last_free_pt = pid_tbl_mask;
+ pid_table[last_free_pt].pt_proc = P_FREE(LINK_EMPTY);
+ /* point at which we grow table - to avoid reusing pids too often */
+ pid_alloc_lim = pid_tbl_mask - 1;
+#undef LINK_EMPTY
+
LIST_INIT(&allproc);
LIST_INIT(&zombproc);
- pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash);
- pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash);
proc_zone = uma_zcreate("PROC", sched_sizeof_proc(),
proc_ctor, proc_dtor, proc_init, proc_fini,
@@ -130,4 +181,162 @@
}
+static void
+expand_pid_table(void)
+{
+ uint pt_size = pid_tbl_mask + 1;
+ struct pid_table *n_pt, *new_pt;
+ struct proc *proc;
+ struct pgrp *pgrp;
+ int i;
+ pid_t pid;
+
+ new_pt = malloc(pt_size * 2 * sizeof *new_pt, M_PROC, M_WAITOK);
+
+ sx_xlock(&allproc_lock);
+ if (pt_size != pid_tbl_mask + 1) {
+ /* Another process beat us to it... */
+ sx_xunlock(&allproc_lock);
+ FREE(new_pt, M_PROC);
+ return;
+ }
+
+ /*
+ * Copy entries from old table into new one.
+ * If 'pid' is 'odd' we need to place in the upper half,
+ * even pid's to the lower half.
+ * Free items stay in the low half so we don't have to
+ * fixup the reference to them.
+ * We stuff free items on the front of the freelist
+ * because we can't write to unmodified entries.
+ * Processing the table backwards maintians a semblance
+ * of issueing pid numbers that increase with time.
+ */
+ i = pt_size - 1;
+ n_pt = new_pt + i;
+ for (; ; i--, n_pt--) {
+ proc = pid_table[i].pt_proc;
+ pgrp = pid_table[i].pt_pgrp;
+ if (!P_VALID(proc)) {
+ /* Up 'use count' so that link is valid */
+ pid = (P_NEXT(proc) + pt_size) & ~pt_size;
+ proc = P_FREE(pid);
+ if (pgrp)
+ pid = pgrp->pg_id;
+ } else
+ pid = proc->p_pid;
+
+ /* Save entry in appropriate half of table */
+ n_pt[pid & pt_size].pt_proc = proc;
+ n_pt[pid & pt_size].pt_pgrp = pgrp;
+
+ /* Put other piece on start of free list */
+ pid = (pid ^ pt_size) & ~pid_tbl_mask;
+ n_pt[pid & pt_size].pt_proc =
+ P_FREE((pid & ~pt_size) | next_free_pt);
+ n_pt[pid & pt_size].pt_pgrp = 0;
+ next_free_pt = i | (pid & pt_size);
+ if (i == 0)
+ break;
+ }
+
+ /* Switch tables */
+ n_pt = pid_table;
+ pid_table = new_pt;
+ pid_tbl_mask = pt_size * 2 - 1;
+
+ /*
+ * pid_max starts as PID_MAX (= 30000), once we have 16384
+ * allocated pids we need it to be larger!
+ */
+ if (pid_tbl_mask > PID_MAX) {
+ pid_max = pid_tbl_mask * 2 + 1;
+ pid_alloc_lim |= pid_alloc_lim << 1;
+ } else
+ pid_alloc_lim <<= 1; /* doubles number of free slots... */
+
+ sx_xunlock(&allproc_lock);
+ FREE(n_pt, M_PROC);
+}
+
+
+/*
+ * Allocate a free proc structure. This method is
+ * called from fork
+ * Expend the mapping table if needs.
+ */
+struct proc *
+proc_alloc(int flags)
+{
+ struct proc *p;
+ int nxt;
+ pid_t pid;
+ struct pid_table *pt;
+
+ p = uma_zalloc(proc_zone, M_WAITOK);
+ p->p_state = PRS_NEW; /* protect against others */
+
+ /* allocate next free pid */
+
+ for (;;expand_pid_table()) {
+ if (pid_alloc_cnt >= pid_alloc_lim)
+ /* ensure pids cycle through 2000+ values */
+ continue;
+ sx_xlock(&allproc_lock);
+ pt = &pid_table[next_free_pt];
+ nxt = P_NEXT(pt->pt_proc);
+ if (nxt & pid_tbl_mask)
+ break;
+ /* Table full - expand (NB last entry not used....) */
+ sx_xunlock(&allproc_lock);
+ }
+ /* pid is 'saved use count' + 'size' + entry */
+ pid = (nxt & ~pid_tbl_mask) + pid_tbl_mask + 1 + next_free_pt;
+ if ((uint)pid > (uint)pid_max)
+ pid &= pid_tbl_mask;
+
+ if ((flags & RFHIGHPID) && pid < 10)
+ pid += pid_tbl_mask + 1;
+
+ p->p_pid = pid;
+ next_free_pt = nxt & pid_tbl_mask;
+
+ /* Grab table slot */
+ pt->pt_proc = p;
+ pid_alloc_cnt++;
+
+ sx_xunlock(&allproc_lock);
+
+ return p;
+
+}
+
+/*
+ * Free last resources of a process - called from proc_free (in kern_exit.c)
+ */
+void
+proc_free(struct proc *p)
+{
+ pid_t pid = p->p_pid;
+ struct pid_table *pt;
+
+ sx_xlock(&allproc_lock);
+
+ pt = &pid_table[pid & pid_tbl_mask];
+ /* save pid use count in slot */
+ pt->pt_proc = P_FREE(pid & ~pid_tbl_mask);
+
+ if (pt->pt_pgrp == NULL) {
+ /* link last freed entry onto ours */
+ pid &= pid_tbl_mask;
+ pt = &pid_table[last_free_pt];
+ pt->pt_proc = P_FREE(P_NEXT(pt->pt_proc) | pid);
+ last_free_pt = pid;
+ pid_alloc_cnt--;
+ }
+
+ nprocs--;
+ sx_xunlock(&allproc_lock);
+ uma_zfree(proc_zone, p);
+}
/*
* Prepare a proc for use.
@@ -254,9 +463,11 @@
sx_slock(&allproc_lock);
- LIST_FOREACH(p, PIDHASH(pid), p_hash)
- if (p->p_pid == pid) {
- PROC_LOCK(p);
- break;
- }
+ p = pid_table[pid & pid_tbl_mask].pt_proc;
+ /* Only allow live processes to be found by pid. */
+ if (!P_VALID(p) || p->p_pid != pid)
+ p = 0;
+ else
+ PROC_LOCK(p);
+ /* XXX MP - need to have a reference count... */
sx_sunlock(&allproc_lock);
return (p);
@@ -273,13 +484,19 @@
register struct pgrp *pgrp;
- sx_assert(&proctree_lock, SX_LOCKED);
+ sx_slock(&allproc_lock);
+ pgrp = pid_table[pgid & pid_tbl_mask].pt_pgrp;
- LIST_FOREACH(pgrp, PGRPHASH(pgid), pg_hash) {
- if (pgrp->pg_id == pgid) {
- PGRP_LOCK(pgrp);
- return (pgrp);
- }
- }
- return (NULL);
+ /*
+ * Can't look up a pgrp that only exists because the session
+ * hasn't died yet (traditional)
+ */
+ if (pgrp == NULL || pgrp->pg_id != pgid
+ || LIST_EMPTY(&pgrp->pg_members))
+ pgrp = NULL;
+ else
+ PGRP_LOCK(pgrp);
+ /* XXX MP - need to have a reference count... */
+ sx_sunlock(&allproc_lock);
+ return pgrp;
}
@@ -346,5 +563,5 @@
* this should not deadlock.
*/
- LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash);
+ pid_table[pgid & pid_tbl_mask].pt_pgrp = pgrp;
pgrp->pg_jobc = 0;
SLIST_INIT(&pgrp->pg_sigiolst);
@@ -445,4 +662,29 @@
/*
+ * remove the pg from the PIDTable
+ */
+static void
+pgunlink(pid_t pg_id)
+{
+ struct pgrp *pgrp;
+ struct pid_table *pt;
+
+ sx_assert(&proctree_lock, SX_XLOCKED);
+ pt = &pid_table[pg_id & pid_tbl_mask];
+ pgrp = pt->pt_pgrp;
+ pt->pt_pgrp = 0;
+
+ if (!P_VALID(pt->pt_proc)) {
+ /* orphaned pgrp, put slot onto free list */
+ pg_id &= pid_tbl_mask;
+ pt = &pid_table[last_free_pt];
+ pt->pt_proc = P_FREE(P_NEXT(pt->pt_proc) | pg_id);
+ last_free_pt = pg_id;
+ pid_alloc_cnt--;
+ }
+
+}
+
+/*
* delete a process group
*/
@@ -467,5 +709,5 @@
pgrp->pg_session->s_ttyp->t_pgrp == pgrp)
pgrp->pg_session->s_ttyp->t_pgrp = NULL;
- LIST_REMOVE(pgrp, pg_hash);
+
savesess = pgrp->pg_session;
SESS_LOCK(savesess);
@@ -473,10 +715,17 @@
SESS_UNLOCK(savesess);
PGRP_UNLOCK(pgrp);
+
if (savesess->s_count == 0) {
mtx_destroy(&savesess->s_mtx);
FREE(pgrp->pg_session, M_SESSION);
+ pgunlink(pgrp->pg_id);
+ }
+ else {
+ if (savesess->s_sid != pgrp->pg_id)
+ pgunlink(pgrp->pg_id);
}
- mtx_destroy(&pgrp->pg_mtx);
- FREE(pgrp, M_PGRP);
+
+ mtx_destroy(&pgrp->pg_mtx);
+ FREE(pgrp, M_PGRP);
}
@@ -498,4 +747,22 @@
}
+/*
+ * Delete session - called from SESSRELE when s_count becomes zero.
+ */
+void
+sessdelete(struct session *ss)
+{
+ /*
+ * We keep the pgrp with the same id as the session in
+ * order to stop a process being given the same pid.
+ * Since the pgrp holds a reference to the session, it
+ * must be a 'zombie' pgrp by now.
+ */
+
+ pgunlink(ss->s_sid);
+
+ FREE(ss, M_SESSION);
+}
+
/*
* Adjust pgrp jobc counters when specified process changes process group.
@@ -583,30 +850,43 @@
#ifdef DDB
#include <ddb/ddb.h>
-
DB_SHOW_COMMAND(pgrpdump, pgrpdump)
{
- register struct pgrp *pgrp;
- register struct proc *p;
- register int i;
-
- for (i = 0; i <= pgrphash; i++) {
- if (!LIST_EMPTY(&pgrphashtbl[i])) {
- printf("\tindx %d\n", i);
- LIST_FOREACH(pgrp, &pgrphashtbl[i], pg_hash) {
- printf(
- "\tpgrp %p, pgid %ld, sess %p, sesscnt %d, mem %p\n",
- (void *)pgrp, (long)pgrp->pg_id,
- (void *)pgrp->pg_session,
- pgrp->pg_session->s_count,
- (void *)LIST_FIRST(&pgrp->pg_members));
- LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
- printf("\t\tpid %ld addr %p pgrp %p\n",
- (long)p->p_pid, (void *)p,
- (void *)p->p_pgrp);
- }
- }
- }
- }
+ struct pid_table *pt;
+ struct proc *p;
+ struct pgrp *pgrp;
+ int id;
+
+ printf("pid table %p size %x, next %x, last %x\n",
+ pid_table, pid_tbl_mask+1,
+ next_free_pt, last_free_pt);
+ for (pt = pid_table, id = 0; id <= pid_tbl_mask; id++, pt++) {
+ p = pt->pt_proc;
+ if (!P_VALID(p) && !pt->pt_pgrp)
+ continue;
+ db_printf(" id %x: ", id);
+ if (P_VALID(p))
+ db_printf("proc %p id %d (0x%x) %s\n",
+ p, p->p_pid, p->p_pid, p->p_comm);
+ else
+ db_printf("next %x use %x\n",
+ P_NEXT(p) & pid_tbl_mask,
+ P_NEXT(p) & ~pid_tbl_mask);
+ if ((pgrp = pt->pt_pgrp)) {
+ db_printf("\tsession %p, sid %d, count %d, login %s\n",
+ pgrp->pg_session, pgrp->pg_session->s_sid,
+ pgrp->pg_session->s_count,
+ pgrp->pg_session->s_login);
+ db_printf("\tpgrp %p, pg_id %d, pg_jobc %d, members %p\n",
+ pgrp, pgrp->pg_id, pgrp->pg_jobc,
+ pgrp->pg_members.lh_first);
+ for (p = pgrp->pg_members.lh_first; p != 0;
+ p = p->p_pglist.le_next) {
+ db_printf("\t\tpid %d addr %p pgrp %p %s\n",
+ p->p_pid, p, p->p_pgrp, p->p_comm);
+ }
+ }
+ }
}
+
#endif /* DDB */
void
diff -ru2 /usr/src/sys/sys/proc.h sys/proc.h
--- /usr/src/sys/sys/proc.h Wed Oct 1 09:48:57 2003
+++ sys/proc.h Thu Oct 2 08:49:42 2003
@@ -94,5 +94,4 @@
*/
struct pgrp {
- LIST_ENTRY(pgrp) pg_hash; /* (e) Hash chain. */
LIST_HEAD(, proc) pg_members; /* (m + e) Pointer to pgrp members. */
struct session *pg_session; /* (c) Pointer to session. */
@@ -712,5 +711,5 @@
#define SESSRELE(s) { \
if (--(s)->s_count == 0) \
- FREE(s, M_SESSION); \
+ sessdelete(s); \
}
@@ -788,16 +787,9 @@
#define PARGS_UNLOCK(p) mtx_unlock(&pargs_ref_lock)
-#define PIDHASH(pid) (&pidhashtbl[(pid) & pidhash])
-extern LIST_HEAD(pidhashhead, proc) *pidhashtbl;
-extern u_long pidhash;
-
-#define PGRPHASH(pgid) (&pgrphashtbl[(pgid) & pgrphash])
-extern LIST_HEAD(pgrphashhead, pgrp) *pgrphashtbl;
-extern u_long pgrphash;
-
extern struct sx allproc_lock;
extern struct sx proctree_lock;
extern struct mtx pargs_ref_lock;
extern struct mtx ppeers_lock;
+extern struct pgrp pgrp0; /* Process group for swapper. */
extern struct proc proc0; /* Process slot for swapper. */
extern struct thread thread0; /* Primary thread in proc0 */
@@ -842,4 +834,7 @@
int inferior(struct proc *p);
int leavepgrp(struct proc *p);
+void sessdelete(struct session *);
+struct proc *proc_alloc(int flags);
+void proc_free(struct proc *p);
void mi_switch(void);
int p_candebug(struct thread *td, struct proc *p);
>Release-Note:
>Audit-Trail:
>Unformatted:
help
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200310030427.h934RuOA034289>
