Date: Thu, 1 Mar 2001 16:25:17 +0000 (GMT) From: Doug Rabson <dfr@nlsystems.com> To: John Baldwin <jhb@FreeBSD.org> Cc: <ia64@FreeBSD.org> Subject: Re: Grrrr.. Message-ID: <Pine.BSF.4.33.0103011622410.42199-100000@herring.nlsystems.com> In-Reply-To: <XFMail.010227112837.jhb@FreeBSD.org>
next in thread | previous in thread | raw e-mail | index | archive | help
On Tue, 27 Feb 2001, John Baldwin wrote:
>
> On 27-Feb-01 Doug Rabson wrote:
> > I think it would be a good idea. Basically, we need to be able to cope
> > with the calls to pmap_map() which the VM system makes during
> > initialisation. I'm not sure exactly how many there are - perhaps we can
> > map them all using translation registers, which would be nice.
>
> That would be ideal, yes. :) I guess I'll read up some more on the VM stuff in
> the ia64 docco and see if I can make some sense of it.
I remember now. I always intended to redo pmap_map() via the direct-mapped
segments. This saves kernel virtual address space (of which there is
plenty) but more importantly saves TLB entries and on ia64 VHTP entries. I
just hacked up this patch which isn't tested on anything but I think its
on the right lines. Beware of conflicts - Drew Gallatin is working in this
area.
Index: alpha/alpha/pmap.c
===================================================================
RCS file: /home/ncvs/src/sys/alpha/alpha/pmap.c,v
retrieving revision 1.51
diff -u -r1.51 pmap.c
--- alpha/alpha/pmap.c 2001/01/30 00:35:35 1.51
+++ alpha/alpha/pmap.c 2001/03/01 16:21:21
@@ -898,6 +898,24 @@
return (virt);
}
+/*
+ * Used to map a range of physical addresses into kernel
+ * virtual address space.
+ *
+ * The value passed in '*virt' is a suggested virtual address for
+ * the mapping. Architectures which can support a direct-mapped
+ * physical to virtual region can return the appropriate address
+ * within that region, leaving '*virt' unchanged. Other
+ * architectures should map the pages starting at '*virt' and
+ * update '*virt' with the first usable address after the mapped
+ * region.
+ */
+vm_offset_t
+pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot)
+{
+ return ALPHA_PHYS_TO_K0SEG(start);
+}
+
static vm_page_t
pmap_page_lookup(vm_object_t object, vm_pindex_t pindex)
Index: i386/i386/pmap.c
===================================================================
RCS file: /home/ncvs/src/sys/i386/i386/pmap.c,v
retrieving revision 1.272
diff -u -r1.272 pmap.c
--- i386/i386/pmap.c 2001/01/30 00:35:34 1.272
+++ i386/i386/pmap.c 2001/03/01 16:20:03
@@ -712,22 +712,30 @@
* Used to map a range of physical addresses into kernel
* virtual address space.
*
- * For now, VM is already on, we only need to map the
- * specified memory.
+ * The value passed in '*virt' is a suggested virtual address for
+ * the mapping. Architectures which can support a direct-mapped
+ * physical to virtual region can return the appropriate address
+ * within that region, leaving '*virt' unchanged. Other
+ * architectures should map the pages starting at '*virt' and
+ * update '*virt' with the first usable address after the mapped
+ * region.
*/
vm_offset_t
pmap_map(virt, start, end, prot)
- vm_offset_t virt;
+ vm_offset_t *virt;
vm_offset_t start;
vm_offset_t end;
int prot;
{
+ vm_offset_t sva = *virtp;
+ vm_offset_t va = sva;
while (start < end) {
- pmap_kenter(virt, start);
- virt += PAGE_SIZE;
+ pmap_kenter(va, start);
+ va += PAGE_SIZE;
start += PAGE_SIZE;
}
- return (virt);
+ *virt = va;
+ return (sva);
}
Index: ia64/ia64/pmap.c
===================================================================
RCS file: /home/ncvs/src/sys/ia64/ia64/pmap.c,v
retrieving revision 1.11
diff -u -r1.11 pmap.c
--- ia64/ia64/pmap.c 2001/01/21 22:23:10 1.11
+++ ia64/ia64/pmap.c 2001/03/01 16:21:13
@@ -1115,22 +1115,18 @@
* Used to map a range of physical addresses into kernel
* virtual address space.
*
- * For now, VM is already on, we only need to map the
- * specified memory.
+ * The value passed in '*virt' is a suggested virtual address for
+ * the mapping. Architectures which can support a direct-mapped
+ * physical to virtual region can return the appropriate address
+ * within that region, leaving '*virt' unchanged. Other
+ * architectures should map the pages starting at '*virt' and
+ * update '*virt' with the first usable address after the mapped
+ * region.
*/
vm_offset_t
-pmap_map(vm_offset_t virt, vm_offset_t start, vm_offset_t end, int prot)
+pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot)
{
- /*
- * XXX We should really try to use larger pagesizes here to
- * cut down the number of PVs used.
- */
- while (start < end) {
- pmap_kenter(virt, start);
- virt += PAGE_SIZE;
- start += PAGE_SIZE;
- }
- return (virt);
+ return IA64_PHYS_TO_RR7(start);
}
/*
Index: vm/pmap.h
===================================================================
RCS file: /home/ncvs/src/sys/vm/pmap.h,v
retrieving revision 1.37
diff -u -r1.37 pmap.h
--- vm/pmap.h 2000/10/17 10:05:49 1.37
+++ vm/pmap.h 2001/03/01 16:18:30
@@ -110,7 +110,7 @@
boolean_t pmap_ts_referenced __P((vm_page_t m));
void pmap_kenter __P((vm_offset_t va, vm_offset_t pa));
void pmap_kremove __P((vm_offset_t));
-vm_offset_t pmap_map __P((vm_offset_t, vm_offset_t, vm_offset_t, int));
+vm_offset_t pmap_map __P((vm_offset_t *, vm_offset_t, vm_offset_t, int));
void pmap_object_init_pt __P((pmap_t pmap, vm_offset_t addr,
vm_object_t object, vm_pindex_t pindex, vm_offset_t size,
int pagelimit));
Index: vm/vm_page.c
===================================================================
RCS file: /home/ncvs/src/sys/vm/vm_page.c,v
retrieving revision 1.156
diff -u -r1.156 vm_page.c
--- vm/vm_page.c 2000/12/26 19:41:38 1.156
+++ vm/vm_page.c 2001/03/01 16:17:39
@@ -242,8 +242,6 @@
*
* Note: This computation can be tweaked if desired.
*/
- vm_page_buckets = (struct vm_page **)vaddr;
- bucket = vm_page_buckets;
if (vm_page_bucket_count == 0) {
vm_page_bucket_count = 1;
while (vm_page_bucket_count < atop(total))
@@ -255,16 +253,16 @@
/*
* Validate these addresses.
*/
-
new_start = start + vm_page_bucket_count * sizeof(struct vm_page *);
new_start = round_page(new_start);
- mapped = round_page(vaddr);
- vaddr = pmap_map(mapped, start, new_start,
+ mapped = pmap_map(&vaddr, start, new_start,
VM_PROT_READ | VM_PROT_WRITE);
+ bzero((caddr_t) mapped, new_start - start);
start = new_start;
- vaddr = round_page(vaddr);
- bzero((caddr_t) mapped, vaddr - mapped);
+ vm_page_buckets = (struct vm_page **)mapped;
+ bucket = vm_page_buckets;
+
for (i = 0; i < vm_page_bucket_count; i++) {
*bucket = NULL;
bucket++;
@@ -275,7 +273,6 @@
* use (taking into account the overhead of a page structure per
* page).
*/
-
first_page = phys_avail[0] / PAGE_SIZE;
page_range = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE - first_page;
@@ -286,16 +283,11 @@
* Initialize the mem entry structures now, and put them in the free
* queue.
*/
- vm_page_array = (vm_page_t) vaddr;
- mapped = vaddr;
-
- /*
- * Validate these addresses.
- */
new_start = round_page(start + page_range * sizeof(struct vm_page));
- mapped = pmap_map(mapped, start, new_start,
+ mapped = pmap_map(&vaddr, start, new_start,
VM_PROT_READ | VM_PROT_WRITE);
start = new_start;
+ vm_page_array = (vm_page_t) mapped;
first_managed_page = start / PAGE_SIZE;
--
Doug Rabson Mail: dfr@nlsystems.com
Phone: +44 20 8348 6160
To Unsubscribe: send mail to majordomo@FreeBSD.org
with "unsubscribe freebsd-ia64" in the body of the message
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?Pine.BSF.4.33.0103011622410.42199-100000>
