ref: 5e010fd8a82ec3b6552b4002bcd4be0586081e9a
dir: /sys/src/9/pc/main.c/
#include "u.h" #include "tos.h" #include "../port/lib.h" #include "mem.h" #include "dat.h" #include "fns.h" #include "io.h" #include "../port/pci.h" #include "ureg.h" #include "pool.h" #include "rebootcode.i" Mach *m; Conf conf; int delaylink; int idle_spin; extern void (*i8237alloc)(void); extern void bootscreeninit(void); extern void multibootdebug(void); void main(void) { mach0init(); bootargsinit(); trapinit0(); ioinit(); i8250console(); quotefmtinstall(); screeninit(); print("\nPlan 9\n"); cpuidentify(); meminit0(); archinit(); if(arch->clockinit) arch->clockinit(); meminit(); ramdiskinit(); confinit(); xinit(); trapinit(); mathinit(); if(i8237alloc != nil) i8237alloc(); pcicfginit(); bootscreeninit(); printinit(); cpuidprint(); mmuinit(); if(arch->intrinit) /* launches other processors on an mp */ arch->intrinit(); timersinit(); if(arch->clockenable) arch->clockenable(); procinit0(); initseg(); if(delaylink){ bootlinks(); }else links(); chandevreset(); pageinit(); userinit(); schedinit(); } void mach0init(void) { conf.nmach = 1; MACHP(0) = (Mach*)CPU0MACH; m->pdb = (ulong*)CPU0PDB; m->gdt = (Segdesc*)CPU0GDT; machinit(); active.machs[0] = 1; active.exiting = 0; } void machinit(void) { int machno; ulong *pdb; Segdesc *gdt; machno = m->machno; pdb = m->pdb; gdt = m->gdt; memset(m, 0, sizeof(Mach)); m->machno = machno; m->pdb = pdb; m->gdt = gdt; m->perf.period = 1; /* * For polled uart output at boot, need * a default delay constant. 100000 should * be enough for a while. Cpuidentify will * calculate the real value later. */ m->loopconst = 100000; } void init0(void) { char buf[2*KNAMELEN], **sp; chandevinit(); if(!waserror()){ snprint(buf, sizeof(buf), "%s %s", arch->id, conffile); ksetenv("terminal", buf, 0); ksetenv("cputype", "386", 0); if(cpuserver) ksetenv("service", "cpu", 0); else ksetenv("service", "terminal", 0); setconfenv(); poperror(); } kproc("alarm", alarmkproc, 0); sp = (char**)(USTKTOP - sizeof(Tos) - 8 - sizeof(sp[0])*4); sp[3] = sp[2] = nil; strcpy(sp[1] = (char*)&sp[4], "boot"); sp[0] = nil; touser(sp); } void confinit(void) { char *p; int i, userpcnt; ulong kpages; if(p = getconf("service")){ if(strcmp(p, "cpu") == 0) cpuserver = 1; else if(strcmp(p,"terminal") == 0) cpuserver = 0; } if(p = getconf("*kernelpercent")) userpcnt = 100 - strtol(p, 0, 0); else userpcnt = 0; conf.npage = 0; for(i=0; i<nelem(conf.mem); i++) conf.npage += conf.mem[i].npage; conf.nproc = 100 + ((conf.npage*BY2PG)/MB)*5; if(cpuserver) conf.nproc *= 3; if(conf.nproc > 4000) conf.nproc = 4000; conf.nimage = 200; conf.nswap = conf.nproc*80; conf.nswppo = 4096; if(cpuserver) { if(userpcnt < 10) userpcnt = 70; kpages = conf.npage - (conf.npage*userpcnt)/100; conf.nimage = conf.nproc; /* * Hack for the big boys. Only good while physmem < 4GB. * Give the kernel fixed max + enough to allocate the * page pool. * This is an overestimate as conf.upages < conf.npages. * The patch of nimage is a band-aid, scanning the whole * page list in imagereclaim just takes too long. */ if(getconf("*imagemaxmb") == 0) if(kpages > (64*MB + conf.npage*sizeof(Page))/BY2PG){ kpages = (64*MB + conf.npage*sizeof(Page))/BY2PG; kpages += (conf.nproc*KSTACK)/BY2PG; } } else { if(userpcnt < 10) { if(conf.npage*BY2PG < 16*MB) userpcnt = 50; else userpcnt = 60; } kpages = conf.npage - (conf.npage*userpcnt)/100; /* * Make sure terminals with low memory get at least * 4MB on the first Image chunk allocation. */ if(conf.npage*BY2PG < 16*MB) imagmem->minarena = 4*MB; } /* * can't go past the end of virtual memory * (ulong)-KZERO is 2^32 - KZERO */ if(kpages > ((ulong)-KZERO)/BY2PG) kpages = ((ulong)-KZERO)/BY2PG; conf.upages = conf.npage - kpages; conf.ialloc = (kpages/2)*BY2PG; /* * Guess how much is taken by the large permanent * datastructures. Mntcache and Mntrpc are not accounted for. */ kpages *= BY2PG; kpages -= conf.upages*sizeof(Page) + conf.nproc*sizeof(Proc*) + conf.nimage*sizeof(Image) + conf.nswap + conf.nswppo*sizeof(Page*); mainmem->maxsize = kpages; /* * the dynamic allocation will balance the load properly, * hopefully. be careful with 32-bit overflow. */ imagmem->maxsize = kpages - (kpages/10); if(p = getconf("*imagemaxmb")){ imagmem->maxsize = strtol(p, nil, 0)*MB; if(imagmem->maxsize > mainmem->maxsize) imagmem->maxsize = mainmem->maxsize; } } void procsetup(Proc *p) { fpuprocsetup(p); memset(p->gdt, 0, sizeof(p->gdt)); p->nldt = 0; /* clear debug registers */ memset(p->dr, 0, sizeof(p->dr)); if(m->dr7 != 0){ m->dr7 = 0; putdr7(0); } } void procfork(Proc *p) { /* inherit user descriptors */ memmove(p->gdt, up->gdt, sizeof(p->gdt)); /* copy local descriptor table */ if(up->ldt != nil && up->nldt > 0){ p->ldt = smalloc(sizeof(Segdesc) * up->nldt); memmove(p->ldt, up->ldt, sizeof(Segdesc) * up->nldt); p->nldt = up->nldt; } fpuprocfork(p); } void procrestore(Proc *p) { if(p->dr[7] != 0){ m->dr7 = p->dr[7]; putdr(p->dr); } if(p->vmx != nil) vmxprocrestore(p); fpuprocrestore(p); } /* * Save the mach dependent part of the process state. */ void procsave(Proc *p) { /* we could just always putdr7(0) but accessing DR7 might be slow in a VM */ if(m->dr7 != 0){ m->dr7 = 0; putdr7(0); } if(p->state == Moribund) p->dr[7] = 0; fpuprocsave(p); /* * While this processor is in the scheduler, the process could run * on another processor and exit, returning the page tables to * the free list where they could be reallocated and overwritten. * When this processor eventually has to get an entry from the * trashed page tables it will crash. * * If there's only one processor, this can't happen. * You might think it would be a win not to do this in that case, * especially on VMware, but it turns out not to matter. */ mmuflushtlb(PADDR(m->pdb)); } static void rebootjump(uintptr entry, uintptr code, ulong size) { void (*f)(uintptr, uintptr, ulong); ulong *pdb; arch->introff(); /* * Modify the machine page table to directly map the low 4MB of memory * This allows the reboot code to turn off the page mapping */ pdb = m->pdb; pdb[PDX(0)] = pdb[PDX(KZERO)]; mmuflushtlb(PADDR(pdb)); /* setup reboot trampoline function */ f = (void*)REBOOTADDR; memmove(f, rebootcode, sizeof(rebootcode)); /* off we go - never to return */ coherence(); (*f)(entry, code, size); for(;;); } void exit(int) { cpushutdown(); splhi(); if(m->machno) rebootjump(0, 0, 0); /* clear secrets */ zeroprivatepages(); poolreset(secrmem); arch->reset(); } void reboot(void *entry, void *code, ulong size) { writeconf(); vmxshutdown(); /* * the boot processor is cpu0. execute this function on it * so that the new kernel has the same cpu0. this only matters * because the hardware has a notion of which processor was the * boot processor and we look at it at start up. */ if(m->machno){ procwired(up, 0); sched(); } cpushutdown(); delay(1000); splhi(); /* turn off buffered serial console */ serialoq = nil; /* shutdown devices */ chandevshutdown(); /* clear secrets */ zeroprivatepages(); poolreset(secrmem); /* disable pci devices */ pcireset(); rebootjump((ulong)entry & ~0xF0000000UL, PADDR(code), size); }