Date: Tue, 20 Jun 2006 22:16:34 -0700 From: "Kip Macy" <kip.macy@gmail.com> To: "Andrew R. Reiter" <arr@watson.org> Cc: Perforce Change Reviews <perforce@freebsd.org>, Kip Macy <kmacy@freebsd.org> Subject: Re: PERFORCE change 99646 for review Message-ID: <b1fa29170606202216t7d959627r54c17a87cf473b33@mail.gmail.com> In-Reply-To: <20060619193506.F40529@fledge.watson.org> References: <200606192330.k5JNUMVL029897@repoman.freebsd.org> <20060619193506.F40529@fledge.watson.org>
next in thread | previous in thread | raw e-mail | index | archive | help
Actually rwlocks would probably be best - but they're sx locks right now to make WITNESS happy. Any speedup that comes won't come from being shared, as the allproc lock was shared, it will come from making the process lists per-cpu so that fork, exit, and functions scanning the process lists won't be serialized across all cpus. -Kip On 6/19/06, Andrew R. Reiter <arr@watson.org> wrote: > On Mon, 19 Jun 2006, Kip Macy wrote: > > :http://perforce.freebsd.org/chv.cgi?CH=99646 > : > :Change 99646 by kmacy@kmacy_storage:sun4v_work_sleepq on 2006/06/19 23:29:26 > : > : convert pcpu allproc locks to sx to avoid having to fix cases where > : sx locks are being acquired afterwards > : add 2 missed unlocks > > Nice. Good thought. I have seen the recent "benchmark" SMP emails and am > not aggro like DT, so I don't care about too much detail; however, do you > believe the switch from straight mutex locks to shared/exclusive locks > will assist in some performance areas? > > Thanks for your work, > Andrew > > : > :Affected files ... > : > :.. //depot/projects/kmacy_sun4v/src/sys/fs/pseudofs/pseudofs_vnops.c#4 edit > :.. //depot/projects/kmacy_sun4v/src/sys/kern/imgact_elf.c#4 edit > :.. //depot/projects/kmacy_sun4v/src/sys/kern/init_main.c#7 edit > :.. //depot/projects/kmacy_sun4v/src/sys/kern/kern_descrip.c#5 edit > :.. //depot/projects/kmacy_sun4v/src/sys/kern/kern_exit.c#6 edit > :.. //depot/projects/kmacy_sun4v/src/sys/kern/kern_fork.c#6 edit > :.. //depot/projects/kmacy_sun4v/src/sys/kern/kern_ktrace.c#5 edit > :.. //depot/projects/kmacy_sun4v/src/sys/kern/kern_proc.c#5 edit > :.. //depot/projects/kmacy_sun4v/src/sys/kern/kern_resource.c#6 edit > :.. //depot/projects/kmacy_sun4v/src/sys/kern/kern_sig.c#9 edit > :.. //depot/projects/kmacy_sun4v/src/sys/kern/sched_4bsd.c#9 edit > :.. //depot/projects/kmacy_sun4v/src/sys/kern/subr_pcpu.c#4 edit > :.. //depot/projects/kmacy_sun4v/src/sys/kern/subr_witness.c#7 edit > :.. //depot/projects/kmacy_sun4v/src/sys/kern/sys_process.c#6 edit > :.. //depot/projects/kmacy_sun4v/src/sys/sun4v/include/pcpu.h#15 edit > :.. //depot/projects/kmacy_sun4v/src/sys/sys/pcpu.h#4 edit > :.. //depot/projects/kmacy_sun4v/src/sys/sys/proc.h#7 edit > :.. //depot/projects/kmacy_sun4v/src/sys/vm/vm_glue.c#7 edit > :.. //depot/projects/kmacy_sun4v/src/sys/vm/vm_meter.c#5 edit > :.. //depot/projects/kmacy_sun4v/src/sys/vm/vm_pageout.c#4 edit > : > :Differences ... > : > :==== //depot/projects/kmacy_sun4v/src/sys/fs/pseudofs/pseudofs_vnops.c#4 (text+ko) ==== > : > :@@ -568,18 +568,18 @@ > : /* next process */ > : if (*p == NULL) { > : *p = LIST_FIRST(&(*pc)->pc_allproc); > :- PCPU_PROC_LOCK(*pc); > :+ PCPU_PROC_RLOCK(*pc); > : } else if ((LIST_NEXT(*p, p_list) == NULL) && (SLIST_NEXT(*pc, pc_allcpu) != NULL)) { > :- PCPU_PROC_UNLOCK(*pc); > :+ PCPU_PROC_RUNLOCK(*pc); > : *pc = SLIST_NEXT(*pc, pc_allcpu); > :- PCPU_PROC_LOCK(*pc); > :+ PCPU_PROC_RLOCK(*pc); > : *p = LIST_FIRST(&(*pc)->pc_allproc); > : } else { > : *p = LIST_NEXT(*p, p_list); > : } > : /* out of processes: next node */ > : if (*p == NULL) { > :- PCPU_PROC_UNLOCK(*pc); > :+ PCPU_PROC_RUNLOCK(*pc); > : *pn = (*pn)->pn_next; > : } > : } > : > :==== //depot/projects/kmacy_sun4v/src/sys/kern/imgact_elf.c#4 (text+ko) ==== > : > :@@ -147,14 +147,14 @@ > : > : sx_slock(&allpcpu_lock); > : SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { > :- PCPU_PROC_LOCK(pc); > :+ PCPU_PROC_RLOCK(pc); > : LIST_FOREACH(p, &pc->pc_allproc, p_list) { > :- > : if (p->p_sysent == entry->sysvec) { > : rval = TRUE; > : break; > : } > : } > :+ PCPU_PROC_RUNLOCK(pc); > : } > : sx_sunlock(&allpcpu_lock); > : > : > :==== //depot/projects/kmacy_sun4v/src/sys/kern/init_main.c#7 (text+ko) ==== > : > :@@ -539,12 +539,12 @@ > : > : sx_slock(&allpcpu_lock); > : SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { > :- PCPU_PROC_LOCK(pc); > :+ PCPU_PROC_RLOCK(pc); > : LIST_FOREACH(p, &pc->pc_allproc, p_list) { > : microuptime(&p->p_stats->p_start); > : p->p_rux.rux_runtime = 0; > : } > :- PCPU_PROC_UNLOCK(pc); > :+ PCPU_PROC_RUNLOCK(pc); > : } > : sx_sunlock(&allpcpu_lock); > : PCPU_SET(switchtime, cpu_ticks()); > : > :==== //depot/projects/kmacy_sun4v/src/sys/kern/kern_descrip.c#5 (text+ko) ==== > : > :@@ -2357,7 +2357,7 @@ > : > : sx_slock(&allpcpu_lock); > : SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { > :- PCPU_PROC_LOCK(pc); > :+ PCPU_PROC_RLOCK(pc); > : LIST_FOREACH(p, &pc->pc_allproc, p_list) { > : fdp = fdhold(p); > : if (fdp == NULL) > :@@ -2379,7 +2379,7 @@ > : while (nrele--) > : vrele(olddp); > : } > :- PCPU_PROC_UNLOCK(pc); > :+ PCPU_PROC_RUNLOCK(pc); > : } > : sx_sunlock(&allpcpu_lock); > : if (rootvnode == olddp) { > :@@ -2459,7 +2459,7 @@ > : xf.xf_size = sizeof(xf); > : sx_slock(&allpcpu_lock); > : SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { > :- PCPU_PROC_LOCK(pc); > :+ PCPU_PROC_RLOCK(pc); > : LIST_FOREACH(p, &pc->pc_allproc, p_list) { > : if (p->p_state == PRS_NEW) > : continue; > :@@ -2496,7 +2496,7 @@ > : if (error) > : break; > : } > :- PCPU_PROC_UNLOCK(pc); > :+ PCPU_PROC_RUNLOCK(pc); > : } > : sx_sunlock(&allpcpu_lock); > : return (error); > : > :==== //depot/projects/kmacy_sun4v/src/sys/kern/kern_exit.c#6 (text+ko) ==== > : > :@@ -403,15 +403,10 @@ > : * Remove proc from allproc queue and pidhash chain. > : * Place onto zombproc. Unlink from parent's child list. > : */ > :- if (p->p_pcpu == NULL) > :- panic("process: %d has null pcpu pointer", p->p_pid); > :- if (!mtx_initialized(&(p->p_pcpu->pc_allproc_lock))) > :- panic("null allproc lock on %d", p->p_pcpu->pc_cpuid); > :- > :- PCPU_PROC_LOCK(p->p_pcpu); > :+ PCPU_PROC_WLOCK(p->p_pcpu); > : LIST_REMOVE(p, p_list); > : LIST_INSERT_HEAD(&p->p_pcpu->pc_zombproc, p, p_list); > :- PCPU_PROC_UNLOCK(p->p_pcpu); > :+ PCPU_PROC_WUNLOCK(p->p_pcpu); > : > : mtx_lock(&pidhash_lock); > : LIST_REMOVE(p, p_hash); > :@@ -770,9 +765,9 @@ > : * we have an exclusive reference. > : */ > : > :- PCPU_PROC_LOCK(p->p_pcpu); > :+ PCPU_PROC_WLOCK(p->p_pcpu); > : LIST_REMOVE(p, p_list); /* off zombproc */ > :- PCPU_PROC_UNLOCK(p->p_pcpu); > :+ PCPU_PROC_WUNLOCK(p->p_pcpu); > : > : mtx_lock(&pidhash_lock); > : LIST_REMOVE(p, p_hash); /* off zombproc */ > : > :==== //depot/projects/kmacy_sun4v/src/sys/kern/kern_fork.c#6 (text+ko) ==== > : > :@@ -371,7 +371,7 @@ > : * than trypid, so we can avoid checking for a while. > : */ > : SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { > :- PCPU_PROC_LOCK(pc); > :+ PCPU_PROC_RLOCK(pc); > : > : p2 = LIST_FIRST(&pc->pc_allproc); > : again: > :@@ -385,7 +385,7 @@ > : trypid++; > : if (trypid >= pidchecked) { > : PROC_UNLOCK(p2); > :- PCPU_PROC_UNLOCK(pc); > :+ PCPU_PROC_RUNLOCK(pc); > : goto retry; > : } > : } > :@@ -409,7 +409,7 @@ > : p2 = LIST_FIRST(&pc->pc_zombproc); > : goto again; > : } > :- PCPU_PROC_UNLOCK(pc); > :+ PCPU_PROC_RUNLOCK(pc); > : } > : } > : mtx_unlock(&pidalloc_lock); > :@@ -429,9 +429,9 @@ > : AUDIT_ARG(pid, p2->p_pid); > : p2->p_pcpu = pcpup; > : > :- PCPU_PROC_LOCK(p2->p_pcpu); > :+ PCPU_PROC_WLOCK(p2->p_pcpu); > : LIST_INSERT_HEAD(&p2->p_pcpu->pc_allproc, p2, p_list); > :- PCPU_PROC_UNLOCK(p2->p_pcpu); > :+ PCPU_PROC_WUNLOCK(p2->p_pcpu); > : > : mtx_lock(&pidhash_lock); > : LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash); > : > :==== //depot/projects/kmacy_sun4v/src/sys/kern/kern_ktrace.c#5 (text+ko) ==== > : > :@@ -640,7 +640,7 @@ > : if (ops == KTROP_CLEARFILE) { > : sx_slock(&allpcpu_lock); > : SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { > :- PCPU_PROC_LOCK(pc); > :+ PCPU_PROC_RLOCK(pc); > : LIST_FOREACH(p, &pc->pc_allproc, p_list) { > : PROC_LOCK(p); > : if (p->p_tracevp == vp) { > :@@ -664,7 +664,7 @@ > : } else > : PROC_UNLOCK(p); > : } > :- PCPU_PROC_UNLOCK(pc); > :+ PCPU_PROC_RUNLOCK(pc); > : } > : sx_sunlock(&allpcpu_lock); > : goto done; > :@@ -983,7 +983,7 @@ > : cred = NULL; > : sx_slock(&allpcpu_lock); > : SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { > :- PCPU_PROC_LOCK(pc); > :+ PCPU_PROC_RLOCK(pc); > : LIST_FOREACH(p, &pc->pc_allproc, p_list) { > : PROC_LOCK(p); > : if (p->p_tracevp == vp) { > :@@ -1001,7 +1001,7 @@ > : cred = NULL; > : } > : } > :- PCPU_PROC_UNLOCK(pc); > :+ PCPU_PROC_RUNLOCK(pc); > : } > : sx_sunlock(&allpcpu_lock); > : > : > :==== //depot/projects/kmacy_sun4v/src/sys/kern/kern_proc.c#5 (text+ko) ==== > : > :@@ -110,9 +110,10 @@ > : void > : procinit() > : { > :- > :+ > : sx_init(&allpcpu_lock, "allpcpu"); > : sx_init(&proctree_lock, "proctree"); > :+ sx_init(&pcpu_find(0)->pc_allproc_lock, "cpu allproc"); > : mtx_init(&ppeers_lock, "p_peers", NULL, MTX_DEF); > : mtx_init(&pidalloc_lock, "pidalloc", NULL, MTX_DEF); > : mtx_init(&pidhash_lock, "pidhash", NULL, MTX_DEF); > :@@ -1002,7 +1003,7 @@ > : sx_slock(&allpcpu_lock); > : for (doingzomb = 0 ; doingzomb < 2 ; doingzomb++) { > : SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { > :- PCPU_PROC_LOCK(pc); > :+ PCPU_PROC_RLOCK(pc); > : if (!doingzomb) > : p = LIST_FIRST(&pc->pc_allproc); > : else > :@@ -1105,12 +1106,12 @@ > : > : error = sysctl_out_proc(p, req, flags | doingzomb); > : if (error) { > :- PCPU_PROC_UNLOCK(pc); > :+ PCPU_PROC_RUNLOCK(pc); > : sx_sunlock(&allpcpu_lock); > : return (error); > : } > : } > :- PCPU_PROC_UNLOCK(pc); > :+ PCPU_PROC_RUNLOCK(pc); > : } > : } > : sx_sunlock(&allpcpu_lock); > : > :==== //depot/projects/kmacy_sun4v/src/sys/kern/kern_resource.c#6 (text+ko) ==== > : > :@@ -144,7 +144,7 @@ > : uap->who = td->td_ucred->cr_uid; > : sx_slock(&allpcpu_lock); > : SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { > :- PCPU_PROC_LOCK(pc); > :+ PCPU_PROC_RLOCK(pc); > : LIST_FOREACH(p, &pc->pc_allproc, p_list) { > : PROC_LOCK(p); > : if (!p_cansee(td, p) && > :@@ -154,7 +154,7 @@ > : } > : PROC_UNLOCK(p); > : } > :- PCPU_PROC_UNLOCK(pc); > :+ PCPU_PROC_RUNLOCK(pc); > : } > : sx_sunlock(&allpcpu_lock); > : break; > :@@ -236,9 +236,8 @@ > : uap->who = td->td_ucred->cr_uid; > : sx_slock(&allpcpu_lock); > : SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { > :- PCPU_PROC_LOCK(pc); > :+ PCPU_PROC_RLOCK(pc); > : LIST_FOREACH(p, &pc->pc_allproc, p_list) { > :- > : PROC_LOCK(p); > : if (p->p_ucred->cr_uid == uap->who && > : !p_cansee(td, p)) { > :@@ -247,6 +246,7 @@ > : } > : PROC_UNLOCK(p); > : } > :+ PCPU_PROC_RUNLOCK(pc); > : } > : sx_sunlock(&allpcpu_lock); > : break; > : > :==== //depot/projects/kmacy_sun4v/src/sys/kern/kern_sig.c#9 (text+ko) ==== > : > :@@ -1648,7 +1648,7 @@ > : */ > : sx_slock(&allpcpu_lock); > : SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { > :- PCPU_PROC_LOCK(pc); > :+ PCPU_PROC_RLOCK(pc); > : LIST_FOREACH(p, &pc->pc_allproc, p_list) { > : PROC_LOCK(p); > : if (p->p_pid <= 1 || p->p_flag & P_SYSTEM || > :@@ -1663,7 +1663,7 @@ > : } > : PROC_UNLOCK(p); > : } > :- PCPU_PROC_UNLOCK(pc); > :+ PCPU_PROC_RUNLOCK(pc); > : } > : sx_sunlock(&allpcpu_lock); > : } else { > : > :==== //depot/projects/kmacy_sun4v/src/sys/kern/sched_4bsd.c#9 (text+ko) ==== > : > :@@ -392,7 +392,7 @@ > : realstathz = stathz ? stathz : hz; > : sx_slock(&allpcpu_lock); > : SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { > :- PCPU_PROC_LOCK(pc); > :+ PCPU_PROC_RLOCK(pc); > : LIST_FOREACH(p, &pc->pc_allproc, p_list) { > : /* > : * Prevent state changes and protect run queue. > :@@ -481,7 +481,7 @@ > : } /* end of thread loop */ > : mtx_unlock_spin(&sched_lock); > : } > :- PCPU_PROC_UNLOCK(pc); > :+ PCPU_PROC_RUNLOCK(pc); > : } /* end of process loop */ > : sx_sunlock(&allpcpu_lock); > : } > : > :==== //depot/projects/kmacy_sun4v/src/sys/kern/subr_pcpu.c#4 (text+ko) ==== > : > :@@ -75,7 +75,8 @@ > : cpuid_to_pcpu[cpuid] = pcpu; > : LIST_INIT(&pcpu->pc_allproc); > : LIST_INIT(&pcpu->pc_zombproc); > :- mtx_init(&pcpu->pc_allproc_lock, "cpu allproc", NULL, MTX_DEF); > :+ if (cpuid != 0) > :+ sx_init(&pcpu->pc_allproc_lock, "cpu allproc"); > : SLIST_INSERT_HEAD(&cpuhead, pcpu, pc_allcpu); > : cpu_pcpu_init(pcpu, cpuid, size); > : } > : > :==== //depot/projects/kmacy_sun4v/src/sys/kern/subr_witness.c#7 (text+ko) ==== > : > :@@ -275,13 +275,13 @@ > : */ > : { "proctree", &lock_class_sx }, > : { "allpcpu", &lock_class_sx }, > :+ { "cpu allproc", &lock_class_sx }, > : { NULL, NULL }, > : /* > : * Various mutexes > : */ > : { "Giant", &lock_class_mtx_sleep }, > : { "pidalloc", &lock_class_mtx_sleep }, > :- { "cpu allproc", &lock_class_mtx_sleep }, > : { "pidhash", &lock_class_mtx_sleep }, > : { "filedesc structure", &lock_class_mtx_sleep }, > : { "pipe mutex", &lock_class_mtx_sleep }, > : > :==== //depot/projects/kmacy_sun4v/src/sys/kern/sys_process.c#6 (text+ko) ==== > : > :@@ -532,7 +532,7 @@ > : /* this is slow, should be optimized */ > : sx_slock(&allpcpu_lock); > : SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { > :- PCPU_PROC_LOCK(pc); > :+ PCPU_PROC_RLOCK(pc); > : LIST_FOREACH(p, &pc->pc_allproc, p_list) { > : PROC_LOCK(p); > : mtx_lock_spin(&sched_lock); > :@@ -545,6 +545,7 @@ > : break; /* proc lock held */ > : PROC_UNLOCK(p); > : } > :+ PCPU_PROC_RUNLOCK(pc); > : } > : sx_sunlock(&allpcpu_lock); > : if (p == NULL) { > : > :==== //depot/projects/kmacy_sun4v/src/sys/sun4v/include/pcpu.h#15 (text+ko) ==== > : > :@@ -79,7 +79,7 @@ > : struct rwindow pc_tsbwbuf[2]; \ > : u_int pc_node; \ > : uint16_t pc_cpulist[MAXCPU]; \ > :- uint64_t pad[6]; > :+ uint64_t pad[7]; > : > : /* XXX SUN4V_FIXME - as we access the *_ra and *_size fields in quick > : * succession we _really_ want them to be L1 cache line size aligned > : > :==== //depot/projects/kmacy_sun4v/src/sys/sys/pcpu.h#4 (text+ko) ==== > : > :@@ -43,12 +43,13 @@ > : > : #include <sys/queue.h> > : #include <sys/vmmeter.h> > :+#include <sys/sx.h> > : #include <machine/pcpu.h> > : > : LIST_HEAD(proclist, proc); > : struct pcb; > : struct thread; > :- > :+struct sx; > : /* > : * This structure maps out the global data that needs to be kept on a > : * per-cpu basis. The members are accessed via the PCPU_GET/SET/PTR > :@@ -68,7 +69,7 @@ > : cpumask_t pc_other_cpus; /* Mask of all other cpus */ > : SLIST_ENTRY(pcpu) pc_allcpu; > : struct lock_list_entry *pc_spinlocks; > :- struct mtx pc_allproc_lock; /* lock for pcpu process list */ > :+ struct sx pc_allproc_lock; /* lock for pcpu process list */ > : struct proclist pc_zombproc; > : struct proclist pc_allproc; > : #ifdef KTR_PERCPU > : > :==== //depot/projects/kmacy_sun4v/src/sys/sys/proc.h#7 (text+ko) ==== > : > :@@ -623,8 +623,10 @@ > : #define PROC_LOCK_ASSERT(p, type) mtx_assert(&(p)->p_mtx, (type)) > : > : /* lock pcpu process list */ > :-#define PCPU_PROC_LOCK(pc) mtx_lock(&(pc)->pc_allproc_lock) > :-#define PCPU_PROC_UNLOCK(pc) mtx_unlock(&(pc)->pc_allproc_lock) > :+#define PCPU_PROC_RLOCK(pc) sx_slock(&(pc)->pc_allproc_lock) > :+#define PCPU_PROC_RUNLOCK(pc) sx_sunlock(&(pc)->pc_allproc_lock) > :+#define PCPU_PROC_WLOCK(pc) sx_xlock(&(pc)->pc_allproc_lock) > :+#define PCPU_PROC_WUNLOCK(pc) sx_xunlock(&(pc)->pc_allproc_lock) > : > : > : /* Lock and unlock a process group. */ > : > :==== //depot/projects/kmacy_sun4v/src/sys/vm/vm_glue.c#7 (text+ko) ==== > : > :@@ -678,7 +678,7 @@ > : ppri = INT_MIN; > : sx_slock(&allpcpu_lock); > : SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { > :- PCPU_PROC_LOCK(pc); > :+ PCPU_PROC_RLOCK(pc); > : LIST_FOREACH(p, &pc->pc_allproc, p_list) { > : if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) > : continue; > :@@ -709,7 +709,7 @@ > : } > : mtx_unlock_spin(&sched_lock); > : } > :- PCPU_PROC_UNLOCK(pc); > :+ PCPU_PROC_RUNLOCK(pc); > : } > : sx_sunlock(&allpcpu_lock); > : > :@@ -808,7 +808,7 @@ > : retry: > : sx_slock(&allpcpu_lock); > : SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { > :- PCPU_PROC_LOCK(pc); > :+ PCPU_PROC_RLOCK(pc); > : LIST_FOREACH(p, &pc->pc_allproc, p_list) { > : > : struct vmspace *vm; > :@@ -930,7 +930,7 @@ > : PROC_UNLOCK(p); > : vm_map_unlock(&vm->vm_map); > : vmspace_free(vm); > :- PCPU_PROC_UNLOCK(pc); > :+ PCPU_PROC_RUNLOCK(pc); > : sx_sunlock(&allpcpu_lock); > : goto retry; > : } > :@@ -944,7 +944,7 @@ > : vmspace_free(vm); > : continue; > : } > :- PCPU_PROC_UNLOCK(pc); > :+ PCPU_PROC_RUNLOCK(pc); > : } > : sx_sunlock(&allpcpu_lock); > : /* > : > :==== //depot/projects/kmacy_sun4v/src/sys/vm/vm_meter.c#5 (text+ko) ==== > : > :@@ -143,7 +143,7 @@ > : */ > : sx_slock(&allpcpu_lock); > : SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { > :- PCPU_PROC_LOCK(pc); > :+ PCPU_PROC_RLOCK(pc); > : LIST_FOREACH(p, &pc->pc_allproc, p_list) { > : if (p->p_flag & P_SYSTEM) > : continue; > :@@ -209,7 +209,7 @@ > : if (paging) > : totalp->t_pw++; > : } > :- PCPU_PROC_UNLOCK(pc); > :+ PCPU_PROC_RUNLOCK(pc); > : } > : sx_sunlock(&allpcpu_lock); > : /* > : > :==== //depot/projects/kmacy_sun4v/src/sys/vm/vm_pageout.c#4 (text+ko) ==== > : > :@@ -1224,7 +1224,7 @@ > : bigsize = 0; > : sx_slock(&allpcpu_lock); > : SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { > :- PCPU_PROC_LOCK(pc); > :+ PCPU_PROC_RLOCK(pc); > : LIST_FOREACH(p, &pc->pc_allproc, p_list) { > : int breakout; > : > :@@ -1281,7 +1281,7 @@ > : } else > : PROC_UNLOCK(p); > : } > :- PCPU_PROC_UNLOCK(pc); > :+ PCPU_PROC_RUNLOCK(pc); > : } > : sx_sunlock(&allpcpu_lock); > : if (bigproc != NULL) { > :@@ -1576,7 +1576,7 @@ > : > : sx_slock(&allpcpu_lock); > : SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { > :- PCPU_PROC_LOCK(pc); > :+ PCPU_PROC_RLOCK(pc); > : LIST_FOREACH(p, &pc->pc_allproc, p_list) { > : vm_pindex_t limit, size; > : > :@@ -1630,7 +1630,7 @@ > : &p->p_vmspace->vm_map, limit); > : } > : } > :- PCPU_PROC_UNLOCK(pc); > :+ PCPU_PROC_RUNLOCK(pc); > : } > : sx_sunlock(&allpcpu_lock); > : } > : > : > > -- > arr@watson.org >
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?b1fa29170606202216t7d959627r54c17a87cf473b33>