Skip site navigation (1)Skip section navigation (2)
Date:      Fri, 2 Mar 2001 12:01:48 +0000 (GMT)
From:      Doug Rabson <dfr@nlsystems.com>
To:        John Baldwin <jhb@FreeBSD.org>
Cc:        <ia64@FreeBSD.org>
Subject:   Re: Grrrr..
Message-ID:  <Pine.BSF.4.33.0103021157080.46579-100000@herring.nlsystems.com>
In-Reply-To: <XFMail.010227112837.jhb@FreeBSD.org>

next in thread | previous in thread | raw e-mail | index | archive | help
On Tue, 27 Feb 2001, John Baldwin wrote:

> That would be ideal, yes. :)  I guess I'll read up some more on the VM stuff in
> the ia64 docco and see if I can make some sense of it.

I worked on this some more. Fixing pmap_map() removes most of the calls to
get_pv_entry which happen before pmap_init(). There are still about 320
calls which actually happen *during* pmap_init() which can be covered by a
static array. A kernel with this patch boots a bit further but dies later
somewhere inside xpt_init (I think).

Index: i386/i386/pmap.c
===================================================================
RCS file: /home/ncvs/src/sys/i386/i386/pmap.c,v
retrieving revision 1.273
diff -u -r1.273 pmap.c
--- i386/i386/pmap.c	2001/02/23 01:25:00	1.273
+++ i386/i386/pmap.c	2001/03/02 10:52:18
@@ -711,22 +711,30 @@
  *	Used to map a range of physical addresses into kernel
  *	virtual address space.
  *
- *	For now, VM is already on, we only need to map the
- *	specified memory.
+ *	The value passed in '*virt' is a suggested virtual address for
+ *	the mapping. Architectures which can support a direct-mapped
+ *	physical to virtual region can return the appropriate address
+ *	within that region, leaving '*virt' unchanged. Other
+ *	architectures should map the pages starting at '*virt' and
+ *	update '*virt' with the first usable address after the mapped
+ *	region.
  */
 vm_offset_t
 pmap_map(virt, start, end, prot)
-	vm_offset_t virt;
+	vm_offset_t *virt;
 	vm_offset_t start;
 	vm_offset_t end;
 	int prot;
 {
+	vm_offset_t sva = *virtp;
+	vm_offset_t va = sva;
 	while (start < end) {
-		pmap_kenter(virt, start);
-		virt += PAGE_SIZE;
+		pmap_kenter(va, start);
+		va += PAGE_SIZE;
 		start += PAGE_SIZE;
 	}
-	return (virt);
+	*virt = va;
+	return (sva);
 }


Index: alpha/alpha/pmap.c
===================================================================
RCS file: /home/ncvs/src/sys/alpha/alpha/pmap.c,v
retrieving revision 1.51
diff -u -r1.51 pmap.c
--- alpha/alpha/pmap.c	2001/01/30 00:35:35	1.51
+++ alpha/alpha/pmap.c	2001/03/01 16:32:17
@@ -884,18 +884,18 @@
  *	Used to map a range of physical addresses into kernel
  *	virtual address space.
  *
- *	For now, VM is already on, we only need to map the
- *	specified memory.
+ *	The value passed in '*virt' is a suggested virtual address for
+ *	the mapping. Architectures which can support a direct-mapped
+ *	physical to virtual region can return the appropriate address
+ *	within that region, leaving '*virt' unchanged. Other
+ *	architectures should map the pages starting at '*virt' and
+ *	update '*virt' with the first usable address after the mapped
+ *	region.
  */
 vm_offset_t
-pmap_map(vm_offset_t virt, vm_offset_t start, vm_offset_t end, int prot)
+pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot)
 {
-	while (start < end) {
-		pmap_kenter(virt, start);
-		virt += PAGE_SIZE;
-		start += PAGE_SIZE;
-	}
-	return (virt);
+	return ALPHA_PHYS_TO_K0SEG(start);
 }


Index: ia64/ia64/pmap.c
===================================================================
RCS file: /home/ncvs/src/sys/ia64/ia64/pmap.c,v
retrieving revision 1.11
diff -u -r1.11 pmap.c
--- ia64/ia64/pmap.c	2001/01/21 22:23:10	1.11
+++ ia64/ia64/pmap.c	2001/03/02 11:54:00
@@ -220,12 +220,9 @@
 static vm_zone_t pvzone;
 static struct vm_zone pvzone_store;
 static struct vm_object pvzone_obj;
-static vm_zone_t pvbootzone;
-static struct vm_zone pvbootzone_store;
 static int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0;
 static int pmap_pagedaemon_waken = 0;
 static struct pv_entry *pvinit;
-static struct pv_entry *pvbootinit;

 static PMAP_INLINE void	free_pv_entry __P((pv_entry_t pv));
 static pv_entry_t get_pv_entry __P((void));
@@ -271,7 +268,6 @@
 pmap_bootstrap()
 {
 	int i;
-	int boot_pvs;

 	/*
 	 * Setup RIDs. We use the bits above pmap_ridbits for a
@@ -319,19 +315,6 @@
 	ia64_set_rr(IA64_RR_BASE(7), (7 << 8) | (28 << 2));

 	/*
-	 * We need some PVs to cope with pmap_kenter() calls prior to
-	 * pmap_init(). This is all a bit flaky and needs to be
-	 * rethought, probably by avoiding the zone allocator
-	 * entirely.
-	 */
-  	boot_pvs = 32768;
-	pvbootzone = &pvbootzone_store;
-	pvbootinit = (struct pv_entry *)
-		pmap_steal_memory(boot_pvs * sizeof (struct pv_entry));
-	zbootinit(pvbootzone, "PV ENTRY", sizeof (struct pv_entry),
-		  pvbootinit, boot_pvs);
-
-	/*
 	 * Set up proc0's PCB.
 	 */
 #if 0
@@ -752,8 +735,23 @@
 static pv_entry_t
 get_pv_entry(void)
 {
-	if (!pvinit)
-		return zalloc(pvbootzone);
+	/*
+	 * We can get called a few times really early before
+	 * pmap_init() has finished allocating the pvzone (mostly as a
+	 * result of the call to kmem_alloc() in pmap_init(). We allow
+	 * a small number of entries to be allocated statically to
+	 * cover this.
+	 */
+	if (!pvinit) {
+#define PV_BOOTSTRAP_NEEDED	512
+		static struct pv_entry pvbootentries[PV_BOOTSTRAP_NEEDED];
+		static int pvbootnext = 0;
+
+		if (pvbootnext == PV_BOOTSTRAP_NEEDED)
+			panic("get_pv_entry: called too many times"
+			      " before pmap_init is finished");
+		return &pvbootentries[pvbootnext++];
+	}

 	pv_entry_count++;
 	if (pv_entry_high_water &&
@@ -1115,22 +1113,18 @@
  *	Used to map a range of physical addresses into kernel
  *	virtual address space.
  *
- *	For now, VM is already on, we only need to map the
- *	specified memory.
+ *	The value passed in '*virt' is a suggested virtual address for
+ *	the mapping. Architectures which can support a direct-mapped
+ *	physical to virtual region can return the appropriate address
+ *	within that region, leaving '*virt' unchanged. Other
+ *	architectures should map the pages starting at '*virt' and
+ *	update '*virt' with the first usable address after the mapped
+ *	region.
  */
 vm_offset_t
-pmap_map(vm_offset_t virt, vm_offset_t start, vm_offset_t end, int prot)
+pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot)
 {
-	/*
-	 * XXX We should really try to use larger pagesizes here to
-	 * cut down the number of PVs used.
-	 */
-	while (start < end) {
-		pmap_kenter(virt, start);
-		virt += PAGE_SIZE;
-		start += PAGE_SIZE;
-	}
-	return (virt);
+	return IA64_PHYS_TO_RR7(start);
 }

 /*
Index: vm/pmap.h
===================================================================
RCS file: /home/ncvs/src/sys/vm/pmap.h,v
retrieving revision 1.37
diff -u -r1.37 pmap.h
--- vm/pmap.h	2000/10/17 10:05:49	1.37
+++ vm/pmap.h	2001/03/01 16:18:30
@@ -110,7 +110,7 @@
 boolean_t	 pmap_ts_referenced __P((vm_page_t m));
 void		 pmap_kenter __P((vm_offset_t va, vm_offset_t pa));
 void		 pmap_kremove __P((vm_offset_t));
-vm_offset_t	 pmap_map __P((vm_offset_t, vm_offset_t, vm_offset_t, int));
+vm_offset_t	 pmap_map __P((vm_offset_t *, vm_offset_t, vm_offset_t, int));
 void		 pmap_object_init_pt __P((pmap_t pmap, vm_offset_t addr,
 		    vm_object_t object, vm_pindex_t pindex, vm_offset_t size,
 		    int pagelimit));
Index: vm/vm_page.c
===================================================================
RCS file: /home/ncvs/src/sys/vm/vm_page.c,v
retrieving revision 1.157
diff -u -r1.157 vm_page.c
--- vm/vm_page.c	2001/03/01 19:21:24	1.157
+++ vm/vm_page.c	2001/03/02 10:57:47
@@ -242,8 +242,6 @@
 	 *
 	 * Note: This computation can be tweaked if desired.
 	 */
-	vm_page_buckets = (struct vm_page **)vaddr;
-	bucket = vm_page_buckets;
 	if (vm_page_bucket_count == 0) {
 		vm_page_bucket_count = 1;
 		while (vm_page_bucket_count < atop(total))
@@ -257,12 +255,12 @@
 	 */
 	new_end = end - vm_page_bucket_count * sizeof(struct vm_page *);
 	new_end = trunc_page(new_end);
-	mapped = round_page(vaddr);
-	vaddr = pmap_map(mapped, new_end, end,
+	mapped = pmap_map(&vaddr, new_end, end,
 	    VM_PROT_READ | VM_PROT_WRITE);
-	vaddr = round_page(vaddr);
-	bzero((caddr_t) mapped, vaddr - mapped);
+	bzero((caddr_t) mapped, end - new_end);

+	vm_page_buckets = (struct vm_page **)mapped;
+	bucket = vm_page_buckets;
 	for (i = 0; i < vm_page_bucket_count; i++) {
 		*bucket = NULL;
 		bucket++;
@@ -281,20 +279,15 @@
 	    (end - new_end)) / PAGE_SIZE;

 	end = new_end;
+
 	/*
 	 * Initialize the mem entry structures now, and put them in the free
 	 * queue.
 	 */
-	vm_page_array = (vm_page_t) vaddr;
-	mapped = vaddr;
-
-	/*
-	 * Validate these addresses.
-	 */
-
 	new_end = trunc_page(end - page_range * sizeof(struct vm_page));
-	mapped = pmap_map(mapped, new_end, end,
+	mapped = pmap_map(&vaddr, new_end, end,
 	    VM_PROT_READ | VM_PROT_WRITE);
+	vm_page_array = (vm_page_t) mapped;

 	/*
 	 * Clear all of the page structures

-- 
Doug Rabson				Mail:  dfr@nlsystems.com
					Phone: +44 20 8348 6160



To Unsubscribe: send mail to majordomo@FreeBSD.org
with "unsubscribe freebsd-ia64" in the body of the message




Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?Pine.BSF.4.33.0103021157080.46579-100000>