Date: Tue, 21 Aug 2001 10:36:50 -0700 (PDT) From: Matt Dillon <dillon@earth.backplane.com> To: Jake Burkholder <jake@k7.locore.ca> Cc: John Baldwin <jhb@FreeBSD.ORG>, freebsd-smp@FreeBSD.ORG, freebsd-alpha@FreeBSD.ORG Subject: Re: Preliminary proposed rollup of kernel submap initialization code Message-ID: <200108211736.f7LHaoC64628@earth.backplane.com> References: <200108210157.f7L1vAh10384@k7.locore.ca>
index | next in thread | previous in thread | raw e-mail
:Yay! I was just about to do almost exactly the same thing!
:
:Looks ok to me. I'll take care of the sparc64 changes.
:
:You might want to move some of the callout initialization to
:kern_timeout.c.
Ok, here is an adjusted patch. This compiles clean (for i386 anyway),
but has NOT been tested yet. Specifically, I rearranged the caddr_t v
address manipulation a bit. I'll be able to test and commit it to
-current this evening.
-Matt
Index: alpha/alpha/machdep.c
===================================================================
RCS file: /home/ncvs/src/sys/alpha/alpha/machdep.c,v
retrieving revision 1.138
diff -u -r1.138 machdep.c
--- alpha/alpha/machdep.c 2001/08/13 22:41:14 1.138
+++ alpha/alpha/machdep.c 2001/08/20 23:39:42
@@ -228,9 +228,7 @@
static void identifycpu __P((void));
-static vm_offset_t buffer_sva, buffer_eva;
-vm_offset_t clean_sva, clean_eva;
-static vm_offset_t pager_sva, pager_eva;
+struct kva_md_info kmi;
/*
* Hooked into the shutdown chain; if the system is to be halted,
@@ -248,13 +246,6 @@
cpu_startup(dummy)
void *dummy;
{
- register unsigned i;
- register caddr_t v;
- vm_offset_t maxaddr;
- vm_size_t size = 0;
- vm_offset_t firstaddr;
- vm_offset_t minaddr;
-
/*
* Good {morning,afternoon,evening,night}.
*/
@@ -281,6 +272,9 @@
}
}
+ vm_ksubmap_init(&kmi);
+
+#if 0
/*
* Calculate callout wheel size
*/
@@ -387,6 +381,7 @@
}
mtx_init(&callout_lock, "callout", MTX_SPIN | MTX_RECURSE);
+#endif
#if defined(USERCONFIG)
#if defined(USERCONFIG_BOOT)
Index: alpha/alpha/pmap.c
===================================================================
RCS file: /home/ncvs/src/sys/alpha/alpha/pmap.c,v
retrieving revision 1.62
diff -u -r1.62 pmap.c
--- alpha/alpha/pmap.c 2001/07/27 01:08:59 1.62
+++ alpha/alpha/pmap.c 2001/08/20 23:23:56
@@ -774,7 +774,7 @@
static PMAP_INLINE int
pmap_track_modified(vm_offset_t va)
{
- if ((va < clean_sva) || (va >= clean_eva))
+ if ((va < kmi.clean_sva) || (va >= kmi.clean_eva))
return 1;
else
return 0;
Index: i386/i386/machdep.c
===================================================================
RCS file: /home/ncvs/src/sys/i386/i386/machdep.c,v
retrieving revision 1.472
diff -u -r1.472 machdep.c
--- i386/i386/machdep.c 2001/08/21 07:20:06 1.472
+++ i386/i386/machdep.c 2001/08/21 07:21:52
@@ -198,9 +198,8 @@
/* must be 2 less so 0 0 can signal end of chunks */
#define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(vm_offset_t)) - 2)
-static vm_offset_t buffer_sva, buffer_eva;
-vm_offset_t clean_sva, clean_eva;
-static vm_offset_t pager_sva, pager_eva;
+struct kva_md_info kmi;
+
static struct trapframe proc0_tf;
#ifndef SMP
static struct globaldata __globaldata;
@@ -213,14 +212,6 @@
cpu_startup(dummy)
void *dummy;
{
- register unsigned i;
- register caddr_t v;
- vm_offset_t maxaddr;
- vm_size_t size = 0;
- int firstaddr;
- vm_offset_t minaddr;
- int physmem_est; /* in pages */
-
/*
* Good {morning,afternoon,evening,night}.
*/
@@ -250,6 +241,9 @@
}
}
+ vm_ksubmap_init(&kmi);
+
+#if 0
/*
* Calculate callout wheel size
*/
@@ -387,6 +381,7 @@
}
mtx_init(&callout_lock, "callout", MTX_SPIN | MTX_RECURSE);
+#endif
#if defined(USERCONFIG)
userconfig();
Index: i386/i386/pmap.c
===================================================================
RCS file: /home/ncvs/src/sys/i386/i386/pmap.c,v
retrieving revision 1.284
diff -u -r1.284 pmap.c
--- i386/i386/pmap.c 2001/07/27 01:08:59 1.284
+++ i386/i386/pmap.c 2001/08/20 23:10:49
@@ -546,7 +546,7 @@
static PMAP_INLINE int
pmap_track_modified(vm_offset_t va)
{
- if ((va < clean_sva) || (va >= clean_eva))
+ if ((va < kmi.clean_sva) || (va >= kmi.clean_eva))
return 1;
else
return 0;
Index: kern/kern_timeout.c
===================================================================
RCS file: /home/ncvs/src/sys/kern/kern_timeout.c,v
retrieving revision 1.69
diff -u -r1.69 kern_timeout.c
--- kern/kern_timeout.c 2001/08/10 21:06:59 1.69
+++ kern/kern_timeout.c 2001/08/21 17:10:20
@@ -62,6 +62,55 @@
static struct callout *nextsoftcheck; /* Next callout to be checked. */
/*
+ * kern_timeout_callwheel_alloc() - kernel low level callwheel initialization
+ *
+ * This code is called very early in the kernel initialization sequence,
+ * and may be called more then once.
+ */
+caddr_t
+kern_timeout_callwheel_alloc(caddr_t v)
+{
+ /*
+ * Calculate callout wheel size
+ */
+ for (callwheelsize = 1, callwheelbits = 0;
+ callwheelsize < ncallout;
+ callwheelsize <<= 1, ++callwheelbits)
+ ;
+ callwheelmask = callwheelsize - 1;
+
+ callout = (struct callout *)v;
+ v = (caddr_t)(callout + ncallout);
+ callwheel = (struct callout_tailq *)v;
+ v = (caddr_t)(callwheel + callwheelsize);
+ return(v);
+}
+
+/*
+ * kern_timeout_callwheel_init() - initialize previously reserved callwheel
+ * space.
+ *
+ * This code is called just once, after the space reserved for the
+ * callout wheel has been finalized.
+ */
+void
+kern_timeout_callwheel_init(void)
+{
+ int i;
+
+ SLIST_INIT(&callfree);
+ for (i = 0; i < ncallout; i++) {
+ callout_init(&callout[i], 0);
+ callout[i].c_flags = CALLOUT_LOCAL_ALLOC;
+ SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle);
+ }
+ for (i = 0; i < callwheelsize; i++) {
+ TAILQ_INIT(&callwheel[i]);
+ }
+ mtx_init(&callout_lock, "callout", MTX_SPIN | MTX_RECURSE);
+}
+
+/*
* The callout mechanism is based on the work of Adam M. Costello and
* George Varghese, published in a technical report entitled "Redesigning
* the BSD Callout and Timer Facilities" and modified slightly for inclusion
Index: kern/vfs_bio.c
===================================================================
RCS file: /home/ncvs/src/sys/kern/vfs_bio.c,v
retrieving revision 1.285
diff -u -r1.285 vfs_bio.c
--- kern/vfs_bio.c 2001/07/27 15:57:17 1.285
+++ kern/vfs_bio.c 2001/08/21 17:26:53
@@ -319,19 +319,73 @@
}
/*
- * Initialize buffer headers and related structures.
+ * Calculating buffer cache scaling values and reserve space for buffer
+ * headers. This is called during low level kernel initialization and
+ * may be called more then once. We CANNOT write to the memory area
+ * being reserved at this time.
*/
-
caddr_t
-bufhashinit(caddr_t vaddr)
+kern_vfs_bio_buffer_alloc(caddr_t v, int physmem_est)
{
- /* first, make a null hash table */
+ /*
+ * The nominal buffer size (and minimum KVA allocation) is BKVASIZE.
+ * For the first 64MB of ram nominally allocate sufficient buffers to
+ * cover 1/4 of our ram. Beyond the first 64MB allocate additional
+ * buffers to cover 1/20 of our ram over 64MB. When auto-sizing
+ * the buffer cache we limit the eventual kva reservation to
+ * maxbcache bytes.
+ *
+ * factor represents the 1/4 x ram conversion.
+ */
+ if (nbuf == 0) {
+ int factor = 4 * BKVASIZE / PAGE_SIZE;
+
+ nbuf = 50;
+ if (physmem_est > 1024)
+ nbuf += min((physmem_est - 1024) / factor,
+ 16384 / factor);
+ if (physmem_est > 16384)
+ nbuf += (physmem_est - 16384) * 2 / (factor * 5);
+
+ if (maxbcache && nbuf > maxbcache / BKVASIZE)
+ nbuf = maxbcache / BKVASIZE;
+ }
+
+ /*
+ * Do not allow the buffer_map to be more then 1/2 the size of the
+ * kernel_map.
+ */
+ if (nbuf > (kernel_map->max_offset - kernel_map->min_offset) /
+ (BKVASIZE * 2)) {
+ nbuf = (kernel_map->max_offset - kernel_map->min_offset) /
+ (BKVASIZE * 2);
+ printf("Warning: nbufs capped at %d\n", nbuf);
+ }
+
+ /*
+ * swbufs are used as temporary holders for I/O, such as paging I/O.
+ * We have no less then 16 and no more then 256.
+ */
+ nswbuf = max(min(nbuf/4, 256), 16);
+
+ /*
+ * Reserve space for the buffer cache buffers
+ */
+ swbuf = (void *)v;
+ v = (caddr_t)(swbuf + nswbuf);
+ buf = (void *)v;
+ v = (caddr_t)(buf + nbuf);
+
+ /*
+ * Calculate the hash table size and reserve space
+ */
for (bufhashmask = 8; bufhashmask < nbuf / 4; bufhashmask <<= 1)
;
- bufhashtbl = (void *)vaddr;
- vaddr = vaddr + sizeof(*bufhashtbl) * bufhashmask;
+ bufhashtbl = (void *)v;
+ v = (caddr_t)(bufhashtbl + bufhashmask);
--bufhashmask;
- return(vaddr);
+
+ return(v);
}
void
Index: sys/buf.h
===================================================================
RCS file: /home/ncvs/src/sys/sys/buf.h,v
retrieving revision 1.119
diff -u -r1.119 buf.h
--- sys/buf.h 2001/08/20 00:41:12 1.119
+++ sys/buf.h 2001/08/21 17:25:30
@@ -513,7 +513,7 @@
struct uio;
-caddr_t bufhashinit __P((caddr_t));
+caddr_t kern_vfs_bio_buffer_alloc __P((caddr_t v, int physmem_est));
void bufinit __P((void));
void bwillwrite __P((void));
int buf_dirty_count_severe __P((void));
Index: sys/systm.h
===================================================================
RCS file: /home/ncvs/src/sys/sys/systm.h,v
retrieving revision 1.148
diff -u -r1.148 systm.h
--- sys/systm.h 2001/08/10 06:37:04 1.148
+++ sys/systm.h 2001/08/21 17:25:20
@@ -208,6 +208,8 @@
void callout_handle_init __P((struct callout_handle *));
struct callout_handle timeout __P((timeout_t *, void *, int));
void untimeout __P((timeout_t *, void *, struct callout_handle));
+caddr_t kern_timeout_callwheel_alloc __P((caddr_t v));
+void kern_timeout_callwheel_init __P((void));
/* Stubs for obsolete functions that used to be for interrupt management */
static __inline void spl0(void) { return; }
Index: vm/vm.h
===================================================================
RCS file: /home/ncvs/src/sys/vm/vm.h,v
retrieving revision 1.18
diff -u -r1.18 vm.h
--- vm/vm.h 2001/07/04 16:20:27 1.18
+++ vm/vm.h 2001/08/20 23:35:44
@@ -113,4 +113,21 @@
typedef struct vm_page *vm_page_t;
#endif
+/*
+ * Information passed from the machine-independant VM initialization code
+ * for use by machine-dependant code (mainly for MMU support)
+ */
+struct kva_md_info {
+ vm_offset_t buffer_sva;
+ vm_offset_t buffer_eva;
+ vm_offset_t clean_sva;
+ vm_offset_t clean_eva;
+ vm_offset_t pager_sva;
+ vm_offset_t pager_eva;
+};
+
+extern struct kva_md_info kmi;
+extern void vm_ksubmap_init(struct kva_md_info *kmi);
+
#endif /* VM_H */
+
Index: vm/vm_init.c
===================================================================
RCS file: /home/ncvs/src/sys/vm/vm_init.c,v
retrieving revision 1.28
diff -u -r1.28 vm_init.c
--- vm/vm_init.c 2001/07/04 16:20:27 1.28
+++ vm/vm_init.c 2001/08/21 17:33:48
@@ -74,8 +74,12 @@
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/systm.h>
+#include <sys/bio.h>
+#include <sys/buf.h>
#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <vm/vm_kern.h>
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <vm/vm_map.h>
@@ -119,3 +123,88 @@
pmap_init(avail_start, avail_end);
vm_pager_init();
}
+
+void
+vm_ksubmap_init(struct kva_md_info *kmi)
+{
+ vm_offset_t firstaddr;
+ caddr_t v;
+ vm_size_t size = 0;
+ int physmem_est;
+ vm_offset_t minaddr;
+ vm_offset_t maxaddr;
+
+ /*
+ * Allocate space for system data structures.
+ * The first available kernel virtual address is in "v".
+ * As pages of kernel virtual memory are allocated, "v" is incremented.
+ * As pages of memory are allocated and cleared,
+ * "firstaddr" is incremented.
+ * An index into the kernel page table corresponding to the
+ * virtual memory address maintained in "v" is kept in "mapaddr".
+ */
+
+ /*
+ * Make two passes. The first pass calculates how much memory is
+ * needed and allocates it. The second pass assigns virtual
+ * addresses to the various data structures.
+ */
+ firstaddr = 0;
+again:
+ v = (caddr_t)firstaddr;
+
+ v = kern_timeout_callwheel_alloc(v);
+
+ /*
+ * Discount the physical memory larger than the size of kernel_map
+ * to avoid eating up all of KVA space.
+ */
+ if (kernel_map->first_free == NULL) {
+ printf("Warning: no free entries in kernel_map.\n");
+ physmem_est = physmem;
+ } else {
+ physmem_est = min(physmem, btoc(kernel_map->max_offset -
+ kernel_map->min_offset));
+ }
+
+ v = kern_vfs_bio_buffer_alloc(v, physmem_est);
+
+ /*
+ * End of first pass, size has been calculated so allocate memory
+ */
+ if (firstaddr == 0) {
+ size = (vm_size_t)((char *)v - firstaddr);
+ firstaddr = kmem_alloc(kernel_map, round_page(size));
+ if (firstaddr == 0)
+ panic("startup: no room for tables");
+ goto again;
+ }
+
+ /*
+ * End of second pass, addresses have been assigned
+ */
+ if ((vm_size_t)((char *)v - firstaddr) != size)
+ panic("startup: table size inconsistency");
+
+ clean_map = kmem_suballoc(kernel_map, &kmi->clean_sva, &kmi->clean_eva,
+ (nbuf*BKVASIZE) + (nswbuf*MAXPHYS) + pager_map_size);
+ buffer_map = kmem_suballoc(clean_map, &kmi->buffer_sva,
+ &kmi->buffer_eva, (nbuf*BKVASIZE));
+ buffer_map->system_map = 1;
+ pager_map = kmem_suballoc(clean_map, &kmi->pager_sva, &kmi->pager_eva,
+ (nswbuf*MAXPHYS) + pager_map_size);
+ pager_map->system_map = 1;
+ exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
+ (16*(ARG_MAX+(PAGE_SIZE*3))));
+
+ /*
+ * XXX: Mbuf system machine-specific initializations should
+ * go here, if anywhere.
+ */
+
+ /*
+ * Initialize the callouts we just allocated.
+ */
+ kern_timeout_callwheel_init();
+}
+
To Unsubscribe: send mail to majordomo@FreeBSD.org
with "unsubscribe freebsd-alpha" in the body of the message
help
Want to link to this message? Use this
URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200108211736.f7LHaoC64628>
