Date: Fri, 8 Oct 1999 19:57:26 -0500 From: Alan Cox <alc@cs.rice.edu> To: Alfred Perlstein <bright@wintelcom.net> Cc: smp@freebsd.org Subject: Re: optimizing the idle-loop and more mp stuff Message-ID: <19991008195726.C36160@nonpc.cs.rice.edu> In-Reply-To: <Pine.BSF.4.05.9910081715240.8080-100000@fw.wintelcom.net>; from Alfred Perlstein on Fri, Oct 08, 1999 at 05:53:07PM -0700 References: <Pine.BSF.4.05.9910081715240.8080-100000@fw.wintelcom.net>
next in thread | previous in thread | raw e-mail | index | archive | help
--3MwIy2ne0vdjdPXF Content-Type: text/plain; charset=us-ascii On Fri, Oct 08, 1999 at 05:53:07PM -0700, Alfred Perlstein wrote: > > I've been looking at the mplock stuff for a while now and I was > wondering if someone could clarify some observations and assumptions > I have. > > 1) the idle loop that zero's pages has the mplock up so that it > can't even run on an idle CPU. > > It seems to me that this could be fixed by ripping some code from > pmap_zero_page() specifically to map the page in and _then_ releasing > the mplock, zero'ing the page and aquiring the mplock again. > > perhaps even grabbing several pages, mapping them in and the zero'ing > them all in one sweep to lower the overhead of mplock manipulation. > Try updating/using the attached patch. Alan --3MwIy2ne0vdjdPXF Content-Type: text/plain; charset=us-ascii Content-Disposition: attachment; filename="idle-lock.patch" Index: i386/i386/vm_machdep.c =================================================================== RCS file: /home/ncvs/src/sys/i386/i386/vm_machdep.c,v retrieving revision 1.125 diff -c -r1.125 vm_machdep.c *** vm_machdep.c 1999/07/31 18:30:49 1.125 --- vm_machdep.c 1999/08/19 17:05:56 *************** *** 546,570 **** return(0); #ifdef SMP ! if (try_mplock()) { #endif - s = splvm(); - __asm __volatile("sti" : : : "memory"); zero_state = 0; m = vm_page_list_find(PQ_FREE, free_rover, FALSE); if (m != NULL && (m->flags & PG_ZERO) == 0) { vm_page_queues[m->queue].lcnt--; TAILQ_REMOVE(vm_page_queues[m->queue].pl, m, pageq); m->queue = PQ_NONE; ! splx(s); ! #if 0 ! rel_mplock(); ! #endif pmap_zero_page(VM_PAGE_TO_PHYS(m)); ! #if 0 ! get_mplock(); ! #endif ! (void)splvm(); vm_page_flag_set(m, PG_ZERO); m->queue = PQ_FREE + m->pc; vm_page_queues[m->queue].lcnt++; --- 546,570 ---- return(0); #ifdef SMP ! if (simple_lock_try(&vm_page_queue_free_mutex)) { #endif zero_state = 0; m = vm_page_list_find(PQ_FREE, free_rover, FALSE); if (m != NULL && (m->flags & PG_ZERO) == 0) { vm_page_queues[m->queue].lcnt--; TAILQ_REMOVE(vm_page_queues[m->queue].pl, m, pageq); m->queue = PQ_NONE; ! ! simple_unlock(&vm_page_queue_free_mutex); ! ! __asm __volatile("sti" : : : "memory"); ! pmap_zero_page(VM_PAGE_TO_PHYS(m)); ! ! __asm __volatile("cli" : : : "memory"); ! ! simple_lock(&vm_page_queue_free_mutex); ! vm_page_flag_set(m, PG_ZERO); m->queue = PQ_FREE + m->pc; vm_page_queues[m->queue].lcnt++; *************** *** 576,585 **** zero_state = 1; } free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK; - splx(s); - __asm __volatile("cli" : : : "memory"); #ifdef SMP ! rel_mplock(); #endif return (1); #ifdef SMP --- 576,583 ---- zero_state = 1; } free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK; #ifdef SMP ! simple_unlock(&vm_page_queue_free_mutex); #endif return (1); #ifdef SMP Index: i386/include/smptests.h =================================================================== RCS file: /home/ncvs/src/sys/i386/include/smptests.h,v retrieving revision 1.32 diff -c -r1.32 smptests.h *** smptests.h 1998/04/06 11:38:18 1.32 --- smptests.h 1999/08/19 08:14:57 *************** *** 129,135 **** * panic()s, as simple locks are binary, and this would cause a deadlock. * */ ! #define SL_DEBUG /* --- 129,135 ---- * panic()s, as simple locks are binary, and this would cause a deadlock. * */ ! #define sL_DEBUG /* Index: vm/vm_page.c =================================================================== RCS file: /home/ncvs/src/sys/vm/vm_page.c,v retrieving revision 1.140 diff -c -r1.140 vm_page.c *** vm_page.c 1999/08/17 18:09:01 1.140 --- vm_page.c 1999/08/19 07:26:49 *************** *** 88,93 **** --- 88,94 ---- static void vm_page_queue_init __P((void)); static vm_page_t vm_page_select_cache __P((vm_object_t, vm_pindex_t)); + static void _vm_page_free_toq(vm_page_t m); /* * Associated with page of user-allocatable memory is a *************** *** 106,111 **** --- 107,114 ---- struct vpgqueues vm_page_queues[PQ_COUNT] = {{0}}; + struct simplelock vm_page_queue_free_mutex; + static void vm_page_queue_init(void) { int i; *************** *** 130,135 **** --- 133,139 ---- panic("vm_page_queue_init: queue %d is null", i); } } + simple_lock_init(&vm_page_queue_free_mutex); } vm_page_t vm_page_array = 0; *************** *** 764,772 **** if ((curproc == pageproc) && (page_req != VM_ALLOC_INTERRUPT)) { page_req = VM_ALLOC_SYSTEM; }; - - s = splvm(); loop: if (cnt.v_free_count > cnt.v_free_reserved) { /* --- 768,775 ---- if ((curproc == pageproc) && (page_req != VM_ALLOC_INTERRUPT)) { page_req = VM_ALLOC_SYSTEM; }; + s = vm_page_queue_free_lock(); loop: if (cnt.v_free_count > cnt.v_free_reserved) { /* *************** *** 795,801 **** */ m = vm_page_select_cache(object, pindex); if (m == NULL) { ! splx(s); #if defined(DIAGNOSTIC) if (cnt.v_cache_count > 0) printf("vm_page_alloc(NORMAL): missing pages on cache queue: %d\n", cnt.v_cache_count); --- 798,804 ---- */ m = vm_page_select_cache(object, pindex); if (m == NULL) { ! vm_page_queue_free_unlock(s); #if defined(DIAGNOSTIC) if (cnt.v_cache_count > 0) printf("vm_page_alloc(NORMAL): missing pages on cache queue: %d\n", cnt.v_cache_count); *************** *** 807,819 **** KASSERT(m->dirty == 0, ("Found dirty cache page %p", m)); vm_page_busy(m); vm_page_protect(m, VM_PROT_NONE); ! vm_page_free(m); goto loop; } else { /* * Not allocateable from cache from interrupt, give up. */ ! splx(s); vm_pageout_deficit++; pagedaemon_wakeup(); return (NULL); --- 810,823 ---- KASSERT(m->dirty == 0, ("Found dirty cache page %p", m)); vm_page_busy(m); vm_page_protect(m, VM_PROT_NONE); ! vm_page_flag_clear(m, PG_ZERO); ! _vm_page_free_toq(m); goto loop; } else { /* * Not allocateable from cache from interrupt, give up. */ ! vm_page_queue_free_unlock(s); vm_pageout_deficit++; pagedaemon_wakeup(); return (NULL); *************** *** 876,882 **** (cnt.v_free_count < cnt.v_pageout_free_min)) pagedaemon_wakeup(); ! splx(s); return (m); } --- 880,886 ---- (cnt.v_free_count < cnt.v_pageout_free_min)) pagedaemon_wakeup(); ! vm_page_queue_free_unlock(s); return (m); } *************** *** 1066,1081 **** * Object and page must be locked prior to entry. * This routine may not block. */ - void vm_page_free_toq(vm_page_t m) { int s; struct vpgqueues *pq; vm_object_t object = m->object; - s = splvm(); - cnt.v_tfree++; #if !defined(MAX_PERF) --- 1070,1091 ---- * Object and page must be locked prior to entry. * This routine may not block. */ void vm_page_free_toq(vm_page_t m) { int s; + + s = vm_page_queue_free_lock(); + _vm_page_free_toq(m); + vm_page_queue_free_unlock(s); + } + + static void + _vm_page_free_toq(vm_page_t m) + { struct vpgqueues *pq; vm_object_t object = m->object; cnt.v_tfree++; #if !defined(MAX_PERF) *************** *** 1108,1114 **** */ if ((m->flags & PG_FICTITIOUS) != 0) { - splx(s); return; } --- 1118,1123 ---- *************** *** 1168,1175 **** } vm_page_free_wakeup(); - - splx(s); } /* --- 1177,1182 ---- Index: vm/vm_page.h =================================================================== RCS file: /home/ncvs/src/sys/vm/vm_page.h,v retrieving revision 1.70 diff -c -r1.70 vm_page.h *** vm_page.h 1999/08/17 22:48:10 1.70 --- vm_page.h 1999/08/19 16:59:44 *************** *** 280,285 **** --- 280,310 ---- #endif + #ifdef SMP + extern struct simplelock vm_page_queue_free_mutex; + #endif + + static __inline int + vm_page_queue_free_lock(void) + { + int s; + + s = splvm(); + #ifdef SMP + simple_lock(&vm_page_queue_free_mutex); + #endif + return s; + } + + static __inline void + vm_page_queue_free_unlock(int s) + { + #ifdef SMP + simple_unlock(&vm_page_queue_free_mutex); + #endif + splx(s); + } + extern int vm_page_zero_count; extern vm_page_t vm_page_array; /* First resident page in table */ --3MwIy2ne0vdjdPXF-- To Unsubscribe: send mail to majordomo@FreeBSD.org with "unsubscribe freebsd-smp" in the body of the message
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?19991008195726.C36160>