Commits

Anonymous committed c092540

eliminate enter_alloc -- use kalloc for everything

  • Participants
  • Parent commits 7e7cb10

Comments (0)

Files changed (4)

 void            ioapicinit(void);
 
 // kalloc.c
-char*           enter_alloc(void);
 char*           kalloc(void);
 void            kfree(char*);
-void            kinit(void);
-uint            detect_memory(void);
+void            kinit1(void*, void*);
+void            kinit2(void*, void*);
 
 // kbd.c
 void            kbdintr(void);
 void            seginit(void);
 void            kvmalloc(void);
 void            vmenable(void);
-pde_t*          setupkvm(char* (*alloc)());
+pde_t*          setupkvm();
 char*           uva2ka(pde_t*, char*);
 int             allocuvm(pde_t*, uint, uint);
 int             deallocuvm(pde_t*, uint, uint);
 #include "mmu.h"
 #include "spinlock.h"
 
+void freerange(void *vstart, void *vend);
+extern char end[]; // first address after kernel loaded from ELF file
+
 struct run {
   struct run *next;
 };
 
 struct {
   struct spinlock lock;
+  int use_lock;
   struct run *freelist;
 } kmem;
 
-extern char end[]; // first address after kernel loaded from ELF file
-static char *newend;
-
-// A simple page allocator to get off the ground during entry
-char *
-enter_alloc(void)
+// Initialization happens in two phases.
+// 1. main() calls kinit1() while still using entrypgdir to place just
+// the pages mapped by entrypgdir on free list.
+// 2. main() calls kinit2() with the rest of the physical pages
+// after installing a full page table that maps them on all cores.
+void
+kinit1(void *vstart, void *vend)
 {
-  if (newend == 0)
-    newend = end;
+  initlock(&kmem.lock, "kmem");
+  kmem.use_lock = 0;
+  freerange(vstart, vend);
+}
 
-  if ((uint) newend >= KERNBASE + 0x400000)
-    panic("only first 4Mbyte are mapped during entry");
-  void *p = (void*)PGROUNDUP((uint)newend);
-  memset(p, 0, PGSIZE);
-  newend = newend + PGSIZE;
-  return p;
+void
+kinit2(void *vstart, void *vend)
+{
+  freerange(vstart, vend);
+  kmem.use_lock = 1;
 }
 
-// Initialize free list of physical pages.
 void
-kinit(void)
+freerange(void *vstart, void *vend)
 {
   char *p;
-
-  initlock(&kmem.lock, "kmem");
-  p = (char*)PGROUNDUP((uint)newend);
-  for(; p + PGSIZE <= (char*)p2v(PHYSTOP); p += PGSIZE)
+  p = (char*)PGROUNDUP((uint)vstart);
+  for(; p + PGSIZE <= (char*)vend; p += PGSIZE)
     kfree(p);
 }
 
   // Fill with junk to catch dangling refs.
   memset(v, 1, PGSIZE);
 
-  acquire(&kmem.lock);
+  if(kmem.use_lock)
+    acquire(&kmem.lock);
   r = (struct run*)v;
   r->next = kmem.freelist;
   kmem.freelist = r;
-  release(&kmem.lock);
+  if(kmem.use_lock)
+    release(&kmem.lock);
 }
 
 // Allocate one 4096-byte page of physical memory.
 {
   struct run *r;
 
-  acquire(&kmem.lock);
+  if(kmem.use_lock)
+    acquire(&kmem.lock);
   r = kmem.freelist;
   if(r)
     kmem.freelist = r->next;
-  release(&kmem.lock);
+  if(kmem.use_lock)
+    release(&kmem.lock);
   return (char*)r;
 }
 
 static void startothers(void);
 static void mpmain(void)  __attribute__((noreturn));
 extern pde_t *kpgdir;
+extern char end[]; // first address after kernel loaded from ELF file
 
 // Bootstrap processor starts running C code here.
 // Allocate a real stack and switch to it, first
 int
 main(void)
 {
+  kinit1(end, P2V(4*1024*1024)); // phys page allocator
   kvmalloc();      // kernel page table
   mpinit();        // collect info about this machine
   lapicinit(mpbcpu());
   ideinit();       // disk
   if(!ismp)
     timerinit();   // uniprocessor timer
-  startothers();    // start other processors (must come before kinit)
-  kinit();         // initialize memory allocator
-  userinit();      // first user process  (must come after kinit)
+  startothers();   // start other processors
+  kinit2(P2V(4*1024*1024), P2V(PHYSTOP)); // must come after startothers()
+  userinit();      // first user process
   // Finish setting up this processor in mpmain.
   mpmain();
 }
     // Tell entryother.S what stack to use, where to enter, and what 
     // pgdir to use. We cannot use kpgdir yet, because the AP processor
     // is running in low  memory, so we use entrypgdir for the APs too.
-    // kalloc can return addresses above 4Mbyte (the machine may have 
-    // much more physical memory than 4Mbyte), which aren't mapped by
-    // entrypgdir, so we must allocate a stack using enter_alloc(); 
-    // this introduces the constraint that xv6 cannot use kalloc until 
-    // after these last enter_alloc invocations.
-    stack = enter_alloc();
+    stack = kalloc();
     *(void**)(code-4) = stack + KSTACKSIZE;
     *(void**)(code-8) = mpenter;
     *(int**)(code-12) = (void *) v2p(entrypgdir);
 // that corresponds to virtual address va.  If alloc!=0,
 // create any required page table pages.
 static pte_t *
-walkpgdir(pde_t *pgdir, const void *va, char* (*alloc)(void))
+walkpgdir(pde_t *pgdir, const void *va, int alloc)
 {
   pde_t *pde;
   pte_t *pgtab;
   if(*pde & PTE_P){
     pgtab = (pte_t*)p2v(PTE_ADDR(*pde));
   } else {
-    if(!alloc || (pgtab = (pte_t*)alloc()) == 0)
+    if(!alloc || (pgtab = (pte_t*)kalloc()) == 0)
       return 0;
     // Make sure all those PTE_P bits are zero.
     memset(pgtab, 0, PGSIZE);
 // physical addresses starting at pa. va and size might not
 // be page-aligned.
 static int
-mappages(pde_t *pgdir, void *va, uint size, uint pa,
-         int perm, char* (*alloc)(void))
+mappages(pde_t *pgdir, void *va, uint size, uint pa, int perm)
 {
   char *a, *last;
   pte_t *pte;
   a = (char*)PGROUNDDOWN((uint)va);
   last = (char*)PGROUNDDOWN(((uint)va) + size - 1);
   for(;;){
-    if((pte = walkpgdir(pgdir, a, alloc)) == 0)
+    if((pte = walkpgdir(pgdir, a, 1)) == 0)
       return -1;
     if(*pte & PTE_P)
       panic("remap");
 
 // Set up kernel part of a page table.
 pde_t*
-setupkvm(char* (*alloc)(void))
+setupkvm()
 {
   pde_t *pgdir;
   struct kmap *k;
 
-  if((pgdir = (pde_t*)alloc()) == 0)
+  if((pgdir = (pde_t*)kalloc()) == 0)
     return 0;
   memset(pgdir, 0, PGSIZE);
   if (p2v(PHYSTOP) > (void*)DEVSPACE)
     panic("PHYSTOP too high");
   for(k = kmap; k < &kmap[NELEM(kmap)]; k++)
     if(mappages(pgdir, k->virt, k->phys_end - k->phys_start, 
-                (uint)k->phys_start, k->perm, alloc) < 0)
+                (uint)k->phys_start, k->perm) < 0)
       return 0;
   return pgdir;
 }
 void
 kvmalloc(void)
 {
-  kpgdir = setupkvm(enter_alloc);
+  kpgdir = setupkvm();
   switchkvm();
 }
 
     panic("inituvm: more than a page");
   mem = kalloc();
   memset(mem, 0, PGSIZE);
-  mappages(pgdir, 0, PGSIZE, v2p(mem), PTE_W|PTE_U, kalloc);
+  mappages(pgdir, 0, PGSIZE, v2p(mem), PTE_W|PTE_U);
   memmove(mem, init, sz);
 }
 
       return 0;
     }
     memset(mem, 0, PGSIZE);
-    mappages(pgdir, (char*)a, PGSIZE, v2p(mem), PTE_W|PTE_U, kalloc);
+    mappages(pgdir, (char*)a, PGSIZE, v2p(mem), PTE_W|PTE_U);
   }
   return newsz;
 }
   uint pa, i;
   char *mem;
 
-  if((d = setupkvm(kalloc)) == 0)
+  if((d = setupkvm()) == 0)
     return 0;
   for(i = 0; i < sz; i += PGSIZE){
     if((pte = walkpgdir(pgdir, (void *) i, 0)) == 0)
     if((mem = kalloc()) == 0)
       goto bad;
     memmove(mem, (char*)p2v(pa), PGSIZE);
-    if(mappages(d, (void*)i, PGSIZE, v2p(mem), PTE_W|PTE_U, kalloc) < 0)
+    if(mappages(d, (void*)i, PGSIZE, v2p(mem), PTE_W|PTE_U) < 0)
       goto bad;
   }
   return d;