From owner-p4-projects Tue Jul 9 17:38:38 2002 Delivered-To: p4-projects@freebsd.org Received: by hub.freebsd.org (Postfix, from userid 32767) id 0557F37B401; Tue, 9 Jul 2002 17:37:07 -0700 (PDT) Delivered-To: perforce@freebsd.org Received: from mx1.FreeBSD.org (mx1.FreeBSD.org [216.136.204.125]) by hub.freebsd.org (Postfix) with ESMTP id AC8E137B400 for ; Tue, 9 Jul 2002 17:37:06 -0700 (PDT) Received: from freefall.freebsd.org (freefall.FreeBSD.org [216.136.204.21]) by mx1.FreeBSD.org (Postfix) with ESMTP id 226E443E09 for ; Tue, 9 Jul 2002 17:37:06 -0700 (PDT) (envelope-from peter@freebsd.org) Received: from freefall.freebsd.org (perforce@localhost [127.0.0.1]) by freefall.freebsd.org (8.12.4/8.12.4) with ESMTP id g6A0b6JU023171 for ; Tue, 9 Jul 2002 17:37:06 -0700 (PDT) (envelope-from peter@freebsd.org) Received: (from perforce@localhost) by freefall.freebsd.org (8.12.4/8.12.4/Submit) id g6A0b594023168 for perforce@freebsd.org; Tue, 9 Jul 2002 17:37:05 -0700 (PDT) Date: Tue, 9 Jul 2002 17:37:05 -0700 (PDT) Message-Id: <200207100037.g6A0b594023168@freefall.freebsd.org> X-Authentication-Warning: freefall.freebsd.org: perforce set sender to peter@freebsd.org using -f From: Peter Wemm Subject: PERFORCE change 14001 for review To: Perforce Change Reviews Sender: owner-p4-projects@FreeBSD.ORG Precedence: bulk List-ID: List-Archive: (Web Archive) List-Help: (List Instructions) List-Subscribe: List-Unsubscribe: X-Loop: FreeBSD.ORG http://people.freebsd.org/~peter/p4db/chv.cgi?CH=14001 Change 14001 by peter@peter_overcee on 2002/07/09 17:36:34 sync up with recent changes from my checked out cvs tree. Of note: fix some bugs in pmap_qenter/qremove (doh!) undo hackery for critical_enter() blocking interrupts make PG_G sort-of work again. fix bug in pmap_map() (doh!) catch up with kse3 Affected files ... .. //depot/projects/pmap/sys/i386/i386/bios.c#8 edit .. //depot/projects/pmap/sys/i386/i386/locore.s#8 edit .. //depot/projects/pmap/sys/i386/i386/mp_machdep.c#18 edit .. //depot/projects/pmap/sys/i386/i386/pmap.c#17 edit .. //depot/projects/pmap/sys/i386/i386/vm86.c#4 edit .. //depot/projects/pmap/sys/i386/include/cpufunc.h#11 edit .. //depot/projects/pmap/sys/i386/include/pmap.h#10 edit .. //depot/projects/pmap/sys/kern/subr_witness.c#11 edit .. //depot/projects/pmap/sys/vm/vm_glue.c#7 edit Differences ... ==== //depot/projects/pmap/sys/i386/i386/bios.c#8 (text+ko) ==== @@ -384,31 +384,24 @@ args->seg.code32.limit = 0xffff; ptd = (pd_entry_t *)rcr3(); -printf("ptd %p\n", ptd); if (ptd == (u_int *)IdlePTD) { -printf("matches IdlePTD, which is %p\n", IdlePTD); /* * no page table, so create one and install it. */ pte = (pt_entry_t *)malloc(PAGE_SIZE, M_TEMP, M_WAITOK); ptd = (pd_entry_t *)((u_int)ptd + KERNBASE); *ptd = vtophys(pte) | PG_RW | PG_V; -printf("KERNBASE ptd = %p, pte = %p\n", ptd, pte); - *ptd = vtophys(pte) | PG_RW | PG_V; -printf("set to 0x%x\n", *ptd); } else { /* * this is a user-level page table */ pte = PTmap; } -printf("ok, pte = %p, vm86pa = 0x%x\n", pte, vm86pa); /* * install pointer to page 0. we don't need to flush the tlb, * since there should not be a previous mapping for page 0. */ *pte = (vm86pa - PAGE_SIZE) | PG_RW | PG_V; -printf("*pte is now 0x%x\n", *pte); stack_top = stack; va_start(ap, fmt); @@ -457,15 +450,11 @@ bioscall_vector.vec16.offset = (u_short)args->entry; bioscall_vector.vec16.segment = GSEL(GBIOSCODE16_SEL, SEL_KPL); -printf("doing bios16call\n"); i = bios16_call(&args->r, stack_top); -printf("done\n"); if (pte == PTmap) { -printf("pte = %p, PTmap = %p, zeroing\n", pte, PTmap); *pte = 0; /* remove entry */ } else { -printf("ptd = %p, zeroing\n", ptd); *ptd = 0; /* remove page table */ free(pte, M_TEMP); /* ... and free it */ } @@ -474,7 +463,6 @@ * XXX only needs to be invlpg(0) but that doesn't work on the 386 */ pmap_invalidate_all(kernel_pmap); -printf("done\n"); return (i); } ==== //depot/projects/pmap/sys/i386/i386/locore.s#8 (text+ko) ==== @@ -127,6 +127,7 @@ .globl bootinfo bootinfo: .space BOOTINFO_SIZE /* bootinfo that we can handle */ + .globl KERNend KERNend: .long 0 /* phys addr end of kernel (just after bss) */ physfree: .long 0 /* phys addr of next free page */ ==== //depot/projects/pmap/sys/i386/i386/mp_machdep.c#18 (text+ko) ==== @@ -345,7 +345,7 @@ mtx_init(&com_mtx, "com", NULL, MTX_SPIN); #endif /* USE_COMLOCK */ #ifdef APIC_IO - mtx_init(&smp_tlb_mtx, "tlb", MTX_SPIN); + mtx_init(&smp_tlb_mtx, "tlb", NULL, MTX_SPIN); #endif } @@ -2261,13 +2261,43 @@ mtx_lock_spin(&smp_tlb_mtx); smp_tlb_addr1 = addr1; smp_tlb_addr2 = addr2; - smp_tlb_wait = 0; + atomic_store_int_rel(&smp_tlb_wait, 0); ipi_all_but_self(vector); - while (atomic_load_acq_int(&smp_tlb_wait) < ncpu) - /* XXX cpu_pause() */ ; + while (smp_tlb_wait < ncpu) + ia32_pause(); mtx_unlock_spin(&smp_tlb_mtx); } +/* + * This is about as magic as it gets. fortune(1) has got similar code + * for reversing bits in a word. Who thinks up this stuff?? + * + * Yes, it does appear to be consistently faster than: + * while (i = ffs(m)) { + * m >>= i; + * bits++; + * } + * and + * while (lsb = (m & -m)) { /* This is magic too */ + * m &= ~lsb; /* or: m ^= lsb */ + * bits++; + * } + * Both of these latter forms do some very strange things on gcc-3.1 with + * -mcpu=pentiumpro and/or -march=pentiumpro and/or -O or -O2. + * There is probably an SSE or MMX popcnt instruction. + */ +static inline u_int32_t +popcnt(u_int32_t m) +{ + + m = (m & 0x55555555) + ((m & 0xaaaaaaaa) >> 1); + m = (m & 0x33333333) + ((m & 0xcccccccc) >> 2); + m = (m & 0x0f0f0f0f) + ((m & 0xf0f0f0f0) >> 4); + m = (m & 0x00ff00ff) + ((m & 0xff00ff00) >> 8); + m = (m & 0x0000ffff) + ((m & 0xffff0000) >> 16); + return m; +} + static void smp_targeted_tlb_shootdown(u_int mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2) { @@ -2285,13 +2315,7 @@ mask &= ~(1 << PCPU_GET(cpuid)); if (mask == 0) return; - /* Count the target cpus */ - ncpu = 0; - m = mask; - while ((i = ffs(m)) != 0) { - m >>= i; - ncpu++; - } + ncpu = popcnt(mask); if (ncpu > othercpus) { /* XXX this should be a panic offence */ printf("SMP: tlb shootdown to %d other cpus (only have %d)\n", @@ -2308,13 +2332,13 @@ mtx_lock_spin(&smp_tlb_mtx); smp_tlb_addr1 = addr1; smp_tlb_addr2 = addr2; - smp_tlb_wait = 0; + atomic_store_int_rel(&smp_tlb_wait, 0); if (mask == (u_int)-1) ipi_all_but_self(vector); else ipi_selected(mask, vector); - while (atomic_load_acq_int(&smp_tlb_wait) < ncpu) - /* XXX cpu_pause() */ ; + while (smp_tlb_wait < ncpu) + ia32_pause(); mtx_unlock_spin(&smp_tlb_mtx); } #endif ==== //depot/projects/pmap/sys/i386/i386/pmap.c#17 (text+ko) ==== @@ -100,6 +100,7 @@ #include #include +#include #include #include #include @@ -164,6 +165,7 @@ static int nkpt; vm_offset_t kernel_vm_end; +extern u_int32_t KERNend; /* * Data for the pv entry allocation mechanism @@ -364,7 +366,7 @@ PTD[i] = 0; pgeflag = 0; -#if /* !defined(SMP) || */ defined(ENABLE_PG_G) +#if !defined(SMP) || defined(ENABLE_PG_G) if (cpu_feature & CPUID_PGE) pgeflag = PG_G; #endif @@ -403,6 +405,7 @@ #ifdef SMP if (cpu_apic_address == 0) panic("pmap_bootstrap: no local apic! (non-SMP hardware?)"); + /* local apic is mapped on last page */ SMPpt[NPTEPG - 1] = (pt_entry_t)(PG_V | PG_RW | PG_N | pgeflag | (cpu_apic_address & PG_FRAME)); @@ -418,27 +421,36 @@ pmap_set_opt(void) { pt_entry_t *pte; - vm_offset_t va; + vm_offset_t va, endva; - if (pgeflag && (cpu_feature & CPUID_PGE)) + if (pgeflag && (cpu_feature & CPUID_PGE)) { load_cr4(rcr4() | CR4_PGE); + invltlb(); /* Insurance */ + } #ifndef DISABLE_PSE - if (pseflag && (cpu_feature & CPUID_PSE)) + if (pseflag && (cpu_feature & CPUID_PSE)) { load_cr4(rcr4() | CR4_PSE); + invltlb(); /* Insurance */ + } #endif if (PCPU_GET(cpuid) == 0) { #ifndef DISABLE_PSE - if (pdir4mb) + if (pdir4mb) { kernel_pmap->pm_pdir[KPTDI] = PTD[KPTDI] = pdir4mb; + invltlb(); /* Insurance */ + } #endif if (pgeflag) { - /* XXX see earlier comments about virtual_avail */ - for (va = KERNBASE; va < virtual_avail; va += PAGE_SIZE) - { + /* Turn on PG_G for text, data, bss pages. */ + va = (vm_offset_t)btext; + endva = KERNBASE + KERNend; + while (va < endva) { pte = vtopte(va); if (*pte) *pte |= pgeflag; + va += PAGE_SIZE; } + invltlb(); /* Insurance */ } /* * We do not need to broadcast the invltlb here, because @@ -446,7 +458,6 @@ * lock. See ap_init(). */ } - invltlb(); /* local */ } void * @@ -576,8 +587,6 @@ * interrupts disabled here. * XXX we may need to hold schedlock to get a coherent pm_active */ - if (td->td_critnest == 1) - cpu_critical_exit(td->td_savecrit); if (pmap->pm_active == -1 || pmap->pm_active == all_cpus) { invlpg(va); smp_invlpg(va); @@ -622,8 +631,6 @@ * interrupts disabled here. * XXX we may need to hold schedlock to get a coherent pm_active */ - if (td->td_critnest == 1) - cpu_critical_exit(td->td_savecrit); if (pmap->pm_active == -1 || pmap->pm_active == all_cpus) { for (addr = sva; addr < eva; addr += PAGE_SIZE) invlpg(addr); @@ -672,8 +679,6 @@ * interrupts disabled here. * XXX we may need to hold schedlock to get a coherent pm_active */ - if (td->td_critnest == 1) - cpu_critical_exit(td->td_savecrit); if (pmap->pm_active == -1 || pmap->pm_active == all_cpus) { invltlb(); smp_invltlb(); @@ -839,7 +844,7 @@ va += PAGE_SIZE; start += PAGE_SIZE; } - pmap_invalidate_range(kernel_pmap, sva, end); + pmap_invalidate_range(kernel_pmap, sva, va); *virt = va; return (sva); } @@ -856,17 +861,15 @@ void pmap_qenter(vm_offset_t sva, vm_page_t *m, int count) { - vm_offset_t va, end_va; + vm_offset_t va; va = sva; - end_va = va + count * PAGE_SIZE; - - while (va < end_va) { + while (count-- > 0) { pmap_kenter(va, VM_PAGE_TO_PHYS(*m)); va += PAGE_SIZE; m++; } - pmap_invalidate_range(kernel_pmap, sva, end_va); + pmap_invalidate_range(kernel_pmap, sva, va); } /* @@ -876,16 +879,14 @@ void pmap_qremove(vm_offset_t sva, int count) { - vm_offset_t va, end_va; + vm_offset_t va; va = sva; - end_va = va + count * PAGE_SIZE; - - while (va < end_va) { + while (count-- > 0) { pmap_kremove(va); va += PAGE_SIZE; } - pmap_invalidate_range(kernel_pmap, sva, end_va); + pmap_invalidate_range(kernel_pmap, sva, va); } static vm_page_t @@ -920,37 +921,20 @@ td->td_kstack_obj = ksobj; /* get a kernel virtual address for the kstack for this thread */ +#ifdef KSTACK_GUARD ks = kmem_alloc_nofault(kernel_map, (KSTACK_PAGES + 1) * PAGE_SIZE); if (ks == 0) panic("pmap_new_thread: kstack allocation failed"); - - /* - * Set the first page to be the unmapped guard page. - */ - ptek = vtopte(ks); - oldpte = *ptek; - *ptek = 0; - if (oldpte) { -#ifdef I386_CPU - updateneeded = 1; -#else - invlpg(ks); -#endif - } - - /* - * move to the next page, which is where the real stack starts. - */ + if (*vtopte(ks) != 0) + pmap_qremove(ks, 1); ks += PAGE_SIZE; td->td_kstack = ks; - ptek++; #else /* get a kernel virtual address for the kstack for this thread */ ks = kmem_alloc_nofault(kernel_map, KSTACK_PAGES * PAGE_SIZE); if (ks == 0) panic("pmap_new_thread: kstack allocation failed"); td->td_kstack = ks; - ptek = vtopte(ks); #endif /* * For the length of the stack, link in a real page of ram for each @@ -1163,7 +1147,6 @@ (pd_entry_t *)kmem_alloc_pageable(kernel_map, PAGE_SIZE); pmap_kenter((vm_offset_t)pmap->pm_pdir, (vm_offset_t)IdlePTD); invlpg((vm_offset_t)pmap->pm_pdir); - pmap->pm_count = 1; pmap->pm_ptphint = NULL; pmap->pm_active = 0; TAILQ_INIT(&pmap->pm_pvlist); @@ -2265,8 +2248,11 @@ void * pmap_kenter_temporary(vm_offset_t pa, int i) { - pmap_kenter((vm_offset_t)crashdumpmap + (i * PAGE_SIZE), pa); - invlpg((vm_offset_t)crashdumpmap + (i * PAGE_SIZE)); + vm_offset_t va; + + va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE); + pmap_kenter(va, pa); + invlpg(va); return ((void *)crashdumpmap); } @@ -2644,6 +2630,12 @@ * block. */ dstmpte = pmap_allocpte(dst_pmap, addr); + if ((APTDpde & PG_FRAME) != + (saved_pde & PG_FRAME)) { + APTDpde = saved_pde; +printf ("IT HAPPENNED!"); + invltlb(); + } if ((*dst_pte == 0) && (ptetemp = *src_pte)) { /* * Clear the modified and @@ -2675,13 +2667,16 @@ void pmap_zero_page(vm_page_t m) { - vm_offset_t phys = VM_PAGE_TO_PHYS(m); + vm_offset_t phys; -#ifdef SMP - /* XXX overkill, we only want to disable migration here */ - /* XXX or maybe not. down the track we have reentrancy issues */ - critical_enter(); -#endif + /* + * Note that we do not use PG_G here, in case we get preempted + * and end up on another CPU. In doing so, we will have had an + * implied invltlb() by the cpu_switch() routine. This only works + * if we do not use PG_G here. With this concession, we do not + * need to do any IPI shootdowns from here. + */ + phys = VM_PAGE_TO_PHYS(m); if (*CMAP2) panic("pmap_zero_page: CMAP2 busy"); *CMAP2 = PG_V | PG_RW | phys | PG_A | PG_M; @@ -2694,9 +2689,6 @@ bzero(CADDR2, PAGE_SIZE); *CMAP2 = 0; invlpg((vm_offset_t)CADDR2); /* SMP: local cpu only */ -#ifdef SMP - critical_exit(); -#endif } /* @@ -2708,16 +2700,18 @@ void pmap_zero_page_area(vm_page_t m, int off, int size) { - vm_offset_t phys = VM_PAGE_TO_PHYS(m); + vm_offset_t phys; -#ifdef SMP - /* XXX overkill, we only want to disable migration here */ - /* XXX or maybe not. down the track we have reentrancy issues */ - critical_enter(); -#endif + /* + * Note that we do not use PG_G here, in case we get preempted + * and end up on another CPU. In doing so, we will have had an + * implied invltlb() by the cpu_switch() routine. This only works + * if we do not use PG_G here. With this concession, we do not + * need to do any IPI shootdowns from here. + */ + phys = VM_PAGE_TO_PHYS(m); if (*CMAP2) panic("pmap_zero_page: CMAP2 busy"); - *CMAP2 = PG_V | PG_RW | phys | PG_A | PG_M; invlpg((vm_offset_t)CADDR2); /* SMP: local cpu only */ #if defined(I686_CPU) @@ -2728,9 +2722,6 @@ bzero((char *)CADDR2 + off, size); *CMAP2 = 0; invlpg((vm_offset_t)CADDR2); /* SMP: local cpu only */ -#ifdef SMP - critical_exit(); -#endif } /* @@ -2742,20 +2733,20 @@ void pmap_zero_page_idle(vm_page_t m) { - vm_offset_t phys = VM_PAGE_TO_PHYS(m); + vm_offset_t phys; + /* + * Note that we do not use PG_G here, in case we get preempted + * and end up on another CPU. In doing so, we will have had an + * implied invltlb() by the cpu_switch() routine. This only works + * if we do not use PG_G here. With this concession, we do not + * need to do any IPI shootdowns from here. + */ + phys = VM_PAGE_TO_PHYS(m); if (*CMAP3) panic("pmap_zero_page: CMAP3 busy"); - *CMAP3 = PG_V | PG_RW | phys | PG_A | PG_M; -#ifdef SMP - mtx_lock(&Giant); /* IPI sender not MPSAFE */ -#endif - invltlb_1pg((vm_offset_t)CADDR3); -#ifdef SMP - mtx_unlock(&Giant); -#endif - + invlpg((vm_offset_t)CADDR3); /* SMP: local cpu only */ #if defined(I686_CPU) if (cpu_class == CPUCLASS_686) i686_pagezero(CADDR3); @@ -2763,6 +2754,7 @@ #endif bzero(CADDR3, PAGE_SIZE); *CMAP3 = 0; + invlpg((vm_offset_t)CADDR3); /* SMP: local cpu only */ } /* @@ -2775,30 +2767,19 @@ pmap_copy_page(vm_page_t src, vm_page_t dst) { -#ifdef SMP - /* XXX overkill, we only want to disable migration here */ - /* XXX or maybe not. down the track we have reentrancy issues */ - critical_enter(); -#endif if (*CMAP1) panic("pmap_copy_page: CMAP1 busy"); if (*CMAP2) panic("pmap_copy_page: CMAP2 busy"); - *CMAP1 = PG_V | VM_PAGE_TO_PHYS(src) | PG_A; *CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(dst) | PG_A | PG_M; - invlpg((u_int)CADDR1); - invlpg((u_int)CADDR2); -#endif - + invlpg((vm_offset_t)CADDR1); /* SMP: local cpu only */ + invlpg((vm_offset_t)CADDR2); /* SMP: local cpu only */ bcopy(CADDR1, CADDR2, PAGE_SIZE); *CMAP1 = 0; *CMAP2 = 0; - invlpg((u_int)CADDR1); /* SMP: local only */ - invlpg((u_int)CADDR2); /* SMP: local only */ -#ifdef SMP - critical_exit(); -#endif + invlpg((vm_offset_t)CADDR1); /* SMP: local cpu only */ + invlpg((vm_offset_t)CADDR2); /* SMP: local cpu only */ } @@ -3228,16 +3209,11 @@ for (tmpva = va; size > 0; ) { pte = vtopte(tmpva); *pte = pa | PG_RW | PG_V | pgeflag; -#ifdef SMP - cpu_invlpg((void *)tmpva); -#else - invltlb_1pg(tmpva); -#endif size -= PAGE_SIZE; tmpva += PAGE_SIZE; + pa += PAGE_SIZE; } pmap_invalidate_range(kernel_pmap, va, tmpva); - return ((void *)(va + offset)); } @@ -3252,11 +3228,9 @@ base = va & PG_FRAME; offset = va & PAGE_MASK; size = roundup(offset + size, PAGE_SIZE); - for (tmpva = base; size > 0; ) { + for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE) { pte = vtopte(tmpva); *pte = 0; - size -= PAGE_SIZE; - tmpva += PAGE_SIZE; } pmap_invalidate_range(kernel_pmap, va, tmpva); kmem_free(kernel_map, base, size); ==== //depot/projects/pmap/sys/i386/i386/vm86.c#4 (text+ko) ==== @@ -574,7 +574,6 @@ if (intnum < 0 || intnum > 0xff) return (EINVAL); -printf("vm86_intcall: int 0x%x\n", intnum); vmf->vmf_trapno = intnum; mtx_lock(&vm86_lock); retval = vm86_bioscall(vmf); @@ -598,7 +597,6 @@ u_int page; int i, entry, retval; -printf("vm86_datacall: int 0x%x\n", intnum); mtx_lock(&vm86_lock); for (i = 0; i < vmc->npages; i++) { page = vtophys(vmc->pmap[i].kva & PG_FRAME); ==== //depot/projects/pmap/sys/i386/include/cpufunc.h#11 (text+ko) ==== @@ -628,6 +628,7 @@ void outsl(u_int port, void *addr, size_t cnt); void outsw(u_int port, void *addr, size_t cnt); void outw(u_int port, u_short data); +void ia32_pause(void); u_int rcr0(void); u_int rcr2(void); u_int rcr3(void); @@ -641,13 +642,30 @@ void wbinvd(void); void write_eflags(u_int ef); void wrmsr(u_int msr, u_int64_t newval); -critical_t cpu_critical_enter(void); -void cpu_critical_exit(critical_t eflags); +u_int rdr0(void); +void load_dr0(u_int dr0); +u_int rdr1(void); +void load_dr1(u_int dr1); +u_int rdr2(void); +void load_dr2(u_int dr2); +u_int rdr3(void); +void load_dr3(u_int dr3); +u_int rdr4(void); +void load_dr4(u_int dr4); +u_int rdr5(void); +void load_dr5(u_int dr5); +u_int rdr6(void); +void load_dr6(u_int dr6); +u_int rdr7(void); +void load_dr7(u_int dr7); +register_t intr_disable(void); +void intr_restore(register_t ef); #endif /* __GNUC__ */ void ltr(u_short sel); void reset_dbregs(void); + __END_DECLS #endif /* !_MACHINE_CPUFUNC_H_ */ ==== //depot/projects/pmap/sys/i386/include/pmap.h#10 (text+ko) ==== @@ -254,7 +254,6 @@ extern vm_offset_t virtual_end; void pmap_bootstrap(vm_offset_t, vm_offset_t); -pmap_t pmap_kernel(void); void *pmap_mapdev(vm_offset_t, vm_size_t); void pmap_unmapdev(vm_offset_t, vm_size_t); pt_entry_t *pmap_pte(pmap_t, vm_offset_t) __pure2; ==== //depot/projects/pmap/sys/kern/subr_witness.c#11 (text+ko) ==== @@ -223,7 +223,7 @@ { "icu", &lock_class_mtx_spin }, #ifdef SMP { "smp rendezvous", &lock_class_mtx_spin }, -#ifdef __i386__ +#if defined(__i386__) && defined(APIC_IO) { "tlb", &lock_class_mtx_spin }, #endif #endif ==== //depot/projects/pmap/sys/vm/vm_glue.c#7 (text+ko) ==== @@ -319,10 +319,13 @@ rv = vm_pager_get_pages(upobj, &m, 1, 0); if (rv != VM_PAGER_OK) panic("vm_proc_swapin: cannot get upage"); - m = vm_page_lookup(upobj, i); - m->valid = VM_PAGE_BITS_ALL; } + } + if (upobj->resident_page_count != UAREA_PAGES) + panic("vm_proc_swapin: lost pages from upobj"); + TAILQ_FOREACH(m, &upobj->memq, listq) { ma[i] = m; + m->valid = VM_PAGE_BITS_ALL; vm_page_wire(m); vm_page_wakeup(m); vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE); To Unsubscribe: send mail to majordomo@FreeBSD.org with "unsubscribe p4-projects" in the body of the message