ref: 177cbace733eeceaef54e2c1a4032c55d4e100dd
parent: 9fd3e3f239162a192e0136034187ed674106e58b
author: cinap_lenrek <cinap_lenrek@felloff.net>
date: Sat Mar 28 01:15:40 EDT 2015
sgi: work in progress kernel for sgi mips machines (only tested with r5k indy) this provides basic console support using the ARC bios routines theu uartarcs driver. and has native seeq ethernet driver which was written by reading the 2ed devseq driver as i have no documentation on the hardware. mmu and trap code is based on the routerboard kernel.
--- /dev/null
+++ b/sys/src/9/sgi/bootfs.proto
@@ -1,0 +1,37 @@
+$objtype
+ bin
+ awk
+ auth
+ secstore
+ aux
+ kbdfs
+ bind
+ cat
+ dd
+ echo
+ grep
+ ip
+ ipconfig
+ ls
+ mkdir
+ mntgen
+ mount
+ mv
+ ndb
+ dnsgetip
+ ps
+ rc
+ rm
+ sed
+ sleep
+ srv
+ test
+ unmount
+ xd
+rc
+ lib
+ rcmain
+ net.rc 555 sys sys ../boot/net.rc
+ bin
+ bootrc 555 sys sys ../boot/bootrc
+tmp
--- /dev/null
+++ b/sys/src/9/sgi/clock.c
@@ -1,0 +1,167 @@
+#include "u.h"
+#include "../port/lib.h"
+#include "mem.h"
+#include "dat.h"
+#include "fns.h"
+#include "io.h"
+
+#include "ureg.h"
+
+enum {
+ Cyccntres = 2, /* counter advances at ½ clock rate */
+ Basetickfreq = 150*Mhz / Cyccntres, /* sgi/indy */
+
+ Instrs = 10*Mhz,
+};
+
+static long
+issue1loop(void)
+{
+ register int i;
+ long st;
+
+ i = Instrs;
+ st = perfticks();
+ do {
+ --i; --i; --i; --i; --i; --i; --i; --i; --i; --i;
+ --i; --i; --i; --i; --i; --i; --i; --i; --i; --i;
+ --i; --i; --i; --i; --i; --i; --i; --i; --i; --i;
+ --i; --i; --i; --i; --i; --i; --i; --i; --i; --i;
+ --i; --i; --i; --i; --i; --i; --i; --i; --i; --i;
+ --i; --i; --i; --i; --i; --i; --i; --i; --i; --i;
+ --i; --i; --i; --i; --i; --i; --i; --i; --i; --i;
+ --i; --i; --i; --i; --i; --i; --i; --i; --i; --i;
+ --i; --i; --i; --i; --i; --i; --i; --i; --i; --i;
+ --i; --i; --i; --i; --i;
+ /* omit 3 (--i) to account for conditional branch, nop & jump */
+ i -= 1+3; /* --i plus 3 omitted (--i) instructions */
+ } while(--i >= 0);
+ return perfticks() - st;
+}
+
+/* estimate instructions/s. */
+static int
+guessmips(long (*loop)(void), char *)
+{
+ int s;
+ long cyc;
+
+ do {
+ s = splhi();
+ cyc = loop();
+ splx(s);
+ if (cyc < 0)
+ iprint("again...");
+ } while (cyc < 0);
+ /*
+ * Instrs instructions took cyc cycles @ Basetickfreq Hz.
+ * round the result.
+ */
+ return (((vlong)Basetickfreq * Instrs) / cyc + Mhz/2) / Mhz;
+}
+
+void
+clockinit(void)
+{
+ int mips;
+
+ /*
+ * calibrate fastclock
+ */
+ mips = guessmips(issue1loop, "single");
+
+ /*
+ * m->delayloop should be the number of delay loop iterations
+ * needed to consume 1 ms, assuming 2 instr'ns in the delay loop.
+ */
+ m->delayloop = mips*Mhz / (1000 * 2);
+ if(m->delayloop == 0)
+ m->delayloop = 1;
+
+ m->speed = mips;
+ m->hz = m->speed*Mhz;
+
+ m->maxperiod = Basetickfreq / HZ;
+ m->minperiod = Basetickfreq / (100*HZ);
+ wrcompare(rdcount()+m->maxperiod);
+
+ intron(INTR7);
+}
+
+void
+clock(Ureg *ur)
+{
+ wrcompare(rdcount()+m->maxperiod); /* side-effect: dismiss intr */
+ timerintr(ur, 0);
+}
+
+void
+microdelay(int n)
+{
+ ulong now;
+ now = µs();
+ while(µs() - now < n);
+}
+
+void
+delay(int n)
+{
+ while(--n >= 0)
+ microdelay(1000);
+}
+
+ulong
+µs(void)
+{
+ return fastticks2us(fastticks(nil));
+}
+
+uvlong
+fastticks(uvlong *hz)
+{
+ int x;
+ ulong delta, count;
+
+ if(hz)
+ *hz = Basetickfreq;
+
+ /* avoid reentry on interrupt or trap, to prevent recursion */
+ x = splhi();
+ count = rdcount();
+ if(rdcompare() - count > m->maxperiod)
+ wrcompare(count+m->maxperiod);
+ if (count < m->lastcount) /* wrapped around? */
+ delta = count + ((1ull<<32) - m->lastcount);
+ else
+ delta = count - m->lastcount;
+ m->lastcount = count;
+ m->fastticks += delta;
+ splx(x);
+
+ return m->fastticks;
+}
+
+ulong
+perfticks(void)
+{
+ return rdcount();
+}
+
+void
+timerset(Tval next)
+{
+ int x;
+ long period;
+
+ if(next == 0)
+ return;
+ x = splhi(); /* don't let us get scheduled */
+ period = next - fastticks(nil);
+ if(period > m->maxperiod - m->minperiod)
+ period = m->maxperiod;
+ else if(period < m->minperiod)
+ period = m->minperiod;
+ wrcompare(rdcount()+period);
+ splx(x);
+
+}
--- /dev/null
+++ b/sys/src/9/sgi/dat.h
@@ -1,0 +1,211 @@
+typedef struct Conf Conf;
+typedef struct Confmem Confmem;
+typedef struct FPsave FPsave;
+typedef struct KMap KMap;
+typedef struct Lance Lance;
+typedef struct Lancemem Lancemem;
+typedef struct Label Label;
+typedef struct Lock Lock;
+typedef struct Mach Mach;
+typedef struct MMU MMU;
+typedef struct Notsave Notsave;
+typedef struct PMMU PMMU;
+typedef struct Softtlb Softtlb;
+typedef struct Ureg Ureg;
+typedef struct Proc Proc;
+typedef uvlong Tval;
+
+#define MAXSYSARG 5 /* for mount(fd, afd, mpt, flag, arg) */
+
+/*
+ * parameters for sysproc.c and rebootcmd.c
+ */
+#define AOUT_MAGIC V_MAGIC || magic==M_MAGIC
+/* r3k or r4k boot images */
+#define BOOT_MAGIC (0x160<<16) || magic == ((0x160<<16)|3)
+
+/*
+ * machine dependent definitions used by ../port/dat.h
+ */
+
+struct Lock
+{
+ ulong key; /* semaphore (non-zero = locked) */
+ ulong sr;
+ ulong pc;
+ Proc *p;
+ Mach *m;
+ ushort isilock;
+};
+
+struct Label
+{
+ uintptr sp;
+ uintptr pc;
+};
+
+struct Confmem
+{
+ ulong base;
+ ulong npage;
+ ulong kbase;
+ ulong klimit;
+};
+
+struct Conf
+{
+ ulong nmach; /* processors */
+ ulong nproc; /* processes */
+ Confmem mem[4];
+ ulong npage; /* total physical pages of memory */
+ ulong upages; /* user page pool */
+ ulong nimage; /* number of page cache image headers */
+ ulong nswap; /* number of swap pages */
+ int nswppo; /* max # of pageouts per segment pass */
+ ulong copymode; /* 0 is copy on write, 1 is copy on reference */
+ ulong ialloc; /* bytes available for interrupt-time allocation */
+ ulong pipeqsize; /* size in bytes of pipe queues */
+ int nuart; /* number of uart devices */
+};
+
+/*
+ * floating point registers
+ */
+enum
+{
+ /* floating point state */
+ FPinit,
+ FPactive,
+ FPinactive,
+ FPemu,
+
+ /* bit meaning floating point illegal */
+ FPillegal= 0x100,
+};
+
+enum {
+ Nfpregs = 32, /* floats; half as many doubles */
+};
+
+/*
+ * emulated floating point (mips32r2 with ieee fp regs)
+ * fpstate is separate, kept in Proc
+ */
+struct FPsave
+{
+ /* /dev/proc expects the registers to be first in FPsave */
+ ulong reg[Nfpregs]; /* the canonical bits */
+ union {
+ ulong fpstatus; /* both are fcr31 */
+ ulong fpcontrol;
+ };
+
+ int fpdelayexec; /* executing delay slot of branch */
+ uintptr fpdelaypc; /* pc to resume at after */
+ ulong fpdelaysts; /* save across user-mode delay-slot execution */
+
+ /* stuck-fault detection */
+ uintptr fppc; /* addr of last fault */
+ int fpcnt; /* how many consecutive at that addr */
+};
+
+/*
+ * mmu goo in the Proc structure
+ */
+struct PMMU
+{
+ int pidonmach[MAXMACH];
+};
+
+/*
+ * things saved in the Proc structure during a notify
+ */
+struct Notsave
+{
+ ulong nonempty;
+};
+
+#include "../port/portdat.h"
+
+struct Mach
+{
+ /* the following are all known by l.s and cannot be moved */
+ int machno; /* physical id of processor */
+ Softtlb*stb;
+ Proc* proc; /* process on this processor */
+ ulong splpc; /* pc that called splhi() */
+ ulong tlbfault;
+
+ /* the following is safe to move */
+ ulong tlbpurge;
+ ulong ticks; /* of the clock since boot time */
+ Label sched; /* scheduler wakeup */
+ void* alarm; /* alarms bound to this clock */
+ int lastpid; /* last pid allocated on this machine */
+ Proc* pidproc[NTLBPID]; /* proc that owns tlbpid on this mach */
+ KMap* kactive; /* active on this machine */
+ int knext;
+ uchar ktlbx[NTLB]; /* tlb index used for kmap */
+ uchar ktlbnext;
+ int speed; /* cpu speed */
+ ulong delayloop; /* for the delay() routine */
+ ulong fairness; /* for runproc */
+ int flushmmu;
+ int inclockintr;
+ int ilockdepth;
+ Perf perf; /* performance counters */
+ uvlong cyclefreq; /* Frequency of user readable cycle counter */
+
+ /* for per-processor timers */
+ ulong lastcount;
+ uvlong fastticks;
+ ulong hz;
+ ulong maxperiod;
+ ulong minperiod;
+
+ Proc* readied; /* for runproc */
+ ulong schedticks; /* next forced context switch */
+
+ int pfault;
+ int cs;
+ int syscall;
+ int load;
+ int intr;
+ int hashcoll; /* soft-tlb hash collisions */
+ int paststartup; /* for putktlb */
+
+ int stack[1];
+};
+
+struct KMap
+{
+ Ref;
+ ulong virt;
+ ulong phys0;
+ ulong phys1;
+ KMap* next;
+ KMap* konmach[MAXMACH];
+ Page* pg;
+ ulong pc; /* of caller to kmap() */
+};
+
+#define VA(k) ((k)->virt)
+#define PPN(x) ((ulong)(x)>>6)
+
+struct Softtlb
+{
+ ulong virt;
+ ulong phys0;
+ ulong phys1;
+};
+
+struct
+{
+ Lock;
+ long machs; /* bitmap of processors */
+ short exiting;
+ int ispanic;
+}active;
+
+extern register Mach *m;
+extern register Proc *up;
--- /dev/null
+++ b/sys/src/9/sgi/devether.c
@@ -1,0 +1,511 @@
+#include "u.h"
+#include "../port/lib.h"
+#include "mem.h"
+#include "dat.h"
+#include "fns.h"
+#include "io.h"
+#include "pool.h"
+#include "ureg.h"
+#include "../port/error.h"
+#include "../port/netif.h"
+
+#include "etherif.h"
+
+static Ether *etherxx[MaxEther];
+
+Chan*
+etherattach(char* spec)
+{
+ ulong ctlrno;
+ char *p;
+ Chan *chan;
+
+ ctlrno = 0;
+ if(spec && *spec){
+ ctlrno = strtoul(spec, &p, 0);
+ if((ctlrno == 0 && p == spec) || *p || (ctlrno >= MaxEther))
+ error(Ebadarg);
+ }
+ if(etherxx[ctlrno] == 0)
+ error(Enodev);
+
+ chan = devattach('l', spec);
+ if(waserror()){
+ chanfree(chan);
+ nexterror();
+ }
+ chan->dev = ctlrno;
+ if(etherxx[ctlrno]->attach)
+ etherxx[ctlrno]->attach(etherxx[ctlrno]);
+ poperror();
+ return chan;
+}
+
+static Walkqid*
+etherwalk(Chan* chan, Chan* nchan, char** name, int nname)
+{
+ return netifwalk(etherxx[chan->dev], chan, nchan, name, nname);
+}
+
+static int
+etherstat(Chan* chan, uchar* dp, int n)
+{
+ return netifstat(etherxx[chan->dev], chan, dp, n);
+}
+
+static Chan*
+etheropen(Chan* chan, int omode)
+{
+ return netifopen(etherxx[chan->dev], chan, omode);
+}
+
+static Chan*
+ethercreate(Chan*, char*, int, ulong)
+{
+ error(Eperm);
+ return 0;
+}
+
+static void
+etherclose(Chan* chan)
+{
+ netifclose(etherxx[chan->dev], chan);
+}
+
+static long
+etherread(Chan* chan, void* buf, long n, vlong off)
+{
+ Ether *ether;
+ ulong offset = off;
+
+ ether = etherxx[chan->dev];
+ if((chan->qid.type & QTDIR) == 0 && ether->ifstat){
+ /*
+ * With some controllers it is necessary to reach
+ * into the chip to extract statistics.
+ */
+ if(NETTYPE(chan->qid.path) == Nifstatqid)
+ return ether->ifstat(ether, buf, n, offset);
+ else if(NETTYPE(chan->qid.path) == Nstatqid)
+ ether->ifstat(ether, buf, 0, offset);
+ }
+
+ return netifread(ether, chan, buf, n, offset);
+}
+
+static Block*
+etherbread(Chan* chan, long n, ulong offset)
+{
+ return netifbread(etherxx[chan->dev], chan, n, offset);
+}
+
+static int
+etherwstat(Chan* chan, uchar* dp, int n)
+{
+ return netifwstat(etherxx[chan->dev], chan, dp, n);
+}
+
+static void
+etherrtrace(Netfile* f, Etherpkt* pkt, int len)
+{
+ int i, n;
+ Block *bp;
+
+ if(qwindow(f->in) <= 0)
+ return;
+ if(len > 58)
+ n = 58;
+ else
+ n = len;
+ bp = iallocb(64);
+ if(bp == nil)
+ return;
+ memmove(bp->wp, pkt->d, n);
+ i = TK2MS(MACHP(0)->ticks);
+ bp->wp[58] = len>>8;
+ bp->wp[59] = len;
+ bp->wp[60] = i>>24;
+ bp->wp[61] = i>>16;
+ bp->wp[62] = i>>8;
+ bp->wp[63] = i;
+ bp->wp += 64;
+ qpass(f->in, bp);
+}
+
+Block*
+etheriq(Ether* ether, Block* bp, int fromwire)
+{
+ Etherpkt *pkt;
+ ushort type;
+ int len, multi, tome, fromme;
+ Netfile **ep, *f, **fp, *fx;
+ Block *xbp;
+
+ ether->inpackets++;
+
+ pkt = (Etherpkt*)bp->rp;
+ len = BLEN(bp);
+ type = (pkt->type[0]<<8)|pkt->type[1];
+ fx = 0;
+ ep = ðer->f[Ntypes];
+
+ multi = pkt->d[0] & 1;
+ /* check for valid multicast addresses */
+ if(multi && memcmp(pkt->d, ether->bcast, sizeof(pkt->d)) != 0 && ether->prom == 0){
+ if(!activemulti(ether, pkt->d, sizeof(pkt->d))){
+ if(fromwire){
+ freeb(bp);
+ bp = 0;
+ }
+ return bp;
+ }
+ }
+
+ /* is it for me? */
+ tome = memcmp(pkt->d, ether->ea, sizeof(pkt->d)) == 0;
+ fromme = memcmp(pkt->s, ether->ea, sizeof(pkt->s)) == 0;
+
+ /*
+ * Multiplex the packet to all the connections which want it.
+ * If the packet is not to be used subsequently (fromwire != 0),
+ * attempt to simply pass it into one of the connections, thereby
+ * saving a copy of the data (usual case hopefully).
+ */
+ for(fp = ether->f; fp < ep; fp++){
+ if(f = *fp)
+ if(f->type == type || f->type < 0)
+ if(tome || multi || f->prom){
+ /* Don't want to hear bridged packets */
+ if(f->bridge && !fromwire && !fromme)
+ continue;
+ if(!f->headersonly){
+ if(fromwire && fx == 0)
+ fx = f;
+ else if(xbp = iallocb(len)){
+ memmove(xbp->wp, pkt, len);
+ xbp->wp += len;
+ if(qpass(f->in, xbp) < 0) {
+ // print("soverflow for f->in\n");
+ ether->soverflows++;
+ }
+ }
+ else {
+ // print("soverflow iallocb\n");
+ ether->soverflows++;
+ }
+ }
+ else
+ etherrtrace(f, pkt, len);
+ }
+ }
+
+ if(fx){
+ if(qpass(fx->in, bp) < 0) {
+ // print("soverflow for fx->in\n");
+ ether->soverflows++;
+ }
+ return 0;
+ }
+ if(fromwire){
+ freeb(bp);
+ return 0;
+ }
+
+ return bp;
+}
+
+static int
+etheroq(Ether* ether, Block* bp)
+{
+ int len, loopback;
+ Etherpkt *pkt;
+
+ ether->outpackets++;
+
+ /*
+ * Check if the packet has to be placed back onto the input queue,
+ * i.e. if it's a loopback or broadcast packet or the interface is
+ * in promiscuous mode.
+ * If it's a loopback packet indicate to etheriq that the data isn't
+ * needed and return, etheriq will pass-on or free the block.
+ * To enable bridging to work, only packets that were originated
+ * by this interface are fed back.
+ */
+ pkt = (Etherpkt*)bp->rp;
+ len = BLEN(bp);
+ loopback = memcmp(pkt->d, ether->ea, sizeof(pkt->d)) == 0;
+ if(loopback || memcmp(pkt->d, ether->bcast, sizeof(pkt->d)) == 0 || ether->prom)
+ if(etheriq(ether, bp, loopback) == 0)
+ return len;
+
+ qbwrite(ether->oq, bp);
+ if(ether->transmit != nil)
+ ether->transmit(ether);
+ return len;
+}
+
+static long
+etherwrite(Chan* chan, void* buf, long n, vlong)
+{
+ Ether *ether;
+ Block *bp;
+ int nn, onoff;
+ Cmdbuf *cb;
+
+ ether = etherxx[chan->dev];
+ if(NETTYPE(chan->qid.path) != Ndataqid) {
+ nn = netifwrite(ether, chan, buf, n);
+ if(nn >= 0)
+ return nn;
+ cb = parsecmd(buf, n);
+ if(cb->f[0] && strcmp(cb->f[0], "nonblocking") == 0){
+ if(cb->nf <= 1)
+ onoff = 1;
+ else
+ onoff = atoi(cb->f[1]);
+ qnoblock(ether->oq, onoff);
+ free(cb);
+ return n;
+ }
+ free(cb);
+ if(ether->ctl!=nil)
+ return ether->ctl(ether,buf,n);
+
+ error(Ebadctl);
+ }
+
+ if(n > ether->maxmtu)
+ error(Etoobig);
+ if(n < ether->minmtu)
+ error(Etoosmall);
+
+ bp = allocb(n);
+ if(waserror()){
+ freeb(bp);
+ nexterror();
+ }
+ memmove(bp->rp, buf, n);
+ memmove(bp->rp+Eaddrlen, ether->ea, Eaddrlen);
+ poperror();
+ bp->wp += n;
+
+ return etheroq(ether, bp);
+}
+
+static long
+etherbwrite(Chan* chan, Block* bp, ulong)
+{
+ Ether *ether;
+ long n;
+
+ n = BLEN(bp);
+ if(NETTYPE(chan->qid.path) != Ndataqid){
+ if(waserror()) {
+ freeb(bp);
+ nexterror();
+ }
+ n = etherwrite(chan, bp->rp, n, 0);
+ poperror();
+ freeb(bp);
+ return n;
+ }
+ ether = etherxx[chan->dev];
+
+ if(n > ether->maxmtu){
+ freeb(bp);
+ error(Etoobig);
+ }
+ if(n < ether->minmtu){
+ freeb(bp);
+ error(Etoosmall);
+ }
+
+ return etheroq(ether, bp);
+}
+
+static struct {
+ char* type;
+ int (*reset)(Ether*);
+} cards[MaxEther+1];
+
+void
+addethercard(char* t, int (*r)(Ether*))
+{
+ static int ncard;
+
+ if(ncard == MaxEther)
+ panic("too many ether cards");
+ cards[ncard].type = t;
+ cards[ncard].reset = r;
+ ncard++;
+}
+
+int
+parseether(uchar *to, char *from)
+{
+ char nip[4];
+ char *p;
+ int i;
+
+ p = from;
+ for(i = 0; i < Eaddrlen; i++){
+ if(*p == 0)
+ return -1;
+ nip[0] = *p++;
+ if(*p == 0)
+ return -1;
+ nip[1] = *p++;
+ nip[2] = 0;
+ to[i] = strtoul(nip, 0, 16);
+ if(*p == ':')
+ p++;
+ }
+ return 0;
+}
+
+static Ether*
+etherprobe(int cardno, int ctlrno)
+{
+ int i, lg;
+ ulong mb, bsz;
+ Ether *ether;
+ char buf[128], name[32];
+
+ ether = malloc(sizeof(Ether));
+ if(ether == nil){
+ print("etherprobe: no memory for Ether\n");
+ return nil;
+ }
+ memset(ether, 0, sizeof(Ether));
+ ether->ctlrno = ctlrno;
+ ether->mbps = 10;
+ ether->minmtu = ETHERMINTU;
+ ether->maxmtu = ETHERMAXTU;
+
+ if(cardno >= MaxEther || cards[cardno].type == nil){
+ free(ether);
+ return nil;
+ }
+ if(cards[cardno].reset(ether) < 0){
+ free(ether);
+ return nil;
+ }
+
+ snprint(name, sizeof(name), "ether%d", ctlrno);
+
+ intrenable(ether->irqlevel, ether->interrupt, ether);
+
+ i = sprint(buf, "#l%d: %s: %dMbps port 0x%luX irq %d",
+ ctlrno, cards[cardno].type, ether->mbps, ether->port, ether->irq);
+ i += sprint(buf+i, ": %2.2ux%2.2ux%2.2ux%2.2ux%2.2ux%2.2ux",
+ ether->ea[0], ether->ea[1], ether->ea[2],
+ ether->ea[3], ether->ea[4], ether->ea[5]);
+ sprint(buf+i, "\n");
+ print(buf);
+
+ /* compute log10(ether->mbps) into lg */
+ for(lg = 0, mb = ether->mbps; mb >= 10; lg++)
+ mb /= 10;
+ if (lg > 0)
+ lg--;
+ if (lg > 14) /* 2^(14+17) = 2³¹ */
+ lg = 14;
+ /* allocate larger output queues for higher-speed interfaces */
+ bsz = 1UL << (lg + 17); /* 2¹⁷ = 128K, bsz = 2ⁿ × 128K */
+ while (bsz > mainmem->maxsize / 8 && bsz > 128*1024)
+ bsz /= 2;
+
+ netifinit(ether, name, Ntypes, bsz);
+ if(ether->oq == nil) {
+ ether->oq = qopen(bsz, Qmsg, 0, 0);
+ ether->limit = bsz;
+ }
+ if(ether->oq == nil)
+ panic("etherreset %s: can't allocate output queue of %ld bytes", name, bsz);
+ ether->alen = Eaddrlen;
+ memmove(ether->addr, ether->ea, Eaddrlen);
+ memset(ether->bcast, 0xFF, Eaddrlen);
+
+ return ether;
+}
+
+static void
+etherreset(void)
+{
+ Ether *ether;
+ int cardno, ctlrno;
+
+ cardno = ctlrno = 0;
+ while(cards[cardno].type != nil && ctlrno < MaxEther){
+ if(etherxx[ctlrno] != nil){
+ ctlrno++;
+ continue;
+ }
+ if((ether = etherprobe(cardno, ctlrno)) == nil){
+ cardno++;
+ continue;
+ }
+ etherxx[ctlrno] = ether;
+ ctlrno++;
+ }
+}
+
+static void
+ethershutdown(void)
+{
+ Ether *ether;
+ int i;
+
+ for(i = 0; i < MaxEther; i++){
+ ether = etherxx[i];
+ if(ether == nil)
+ continue;
+ if(ether->shutdown == nil) {
+ print("#l%d: no shutdown function\n", i);
+ continue;
+ }
+ (*ether->shutdown)(ether);
+ }
+}
+
+
+#define POLY 0xedb88320
+
+/* really slow 32 bit crc for ethers */
+ulong
+ethercrc(uchar *p, int len)
+{
+ int i, j;
+ ulong crc, b;
+
+ crc = 0xffffffff;
+ for(i = 0; i < len; i++){
+ b = *p++;
+ for(j = 0; j < 8; j++){
+ crc = (crc>>1) ^ (((crc^b) & 1) ? POLY : 0);
+ b >>= 1;
+ }
+ }
+ return crc;
+}
+
+Dev etherdevtab = {
+ 'l',
+ "ether",
+
+ etherreset,
+ devinit,
+ ethershutdown,
+ etherattach,
+ etherwalk,
+ etherstat,
+ etheropen,
+ ethercreate,
+ etherclose,
+ etherread,
+ etherbread,
+ etherwrite,
+ etherbwrite,
+ devremove,
+ etherwstat,
+};
--- /dev/null
+++ b/sys/src/9/sgi/etherif.h
@@ -1,0 +1,40 @@
+enum {
+ MaxEther = 1,
+ Ntypes = 8,
+};
+
+typedef struct Ether Ether;
+struct Ether {
+
+ int ctlrno;
+ int minmtu;
+ int maxmtu;
+ uchar ea[Eaddrlen];
+
+ int irq, irqlevel;
+ uintptr port;
+
+ void (*attach)(Ether*); /* filled in by reset routine */
+ void (*detach)(Ether*);
+ void (*transmit)(Ether*);
+ void (*interrupt)(Ureg*, void*);
+ long (*ifstat)(Ether*, void*, long, ulong);
+ long (*ctl)(Ether*, void*, long); /* custom ctl messages */
+ void (*power)(Ether*, int); /* power on/off */
+ void (*shutdown)(Ether*); /* shutdown hardware before reboot */
+ void *ctlr;
+
+ Queue* oq;
+
+ Netif;
+};
+
+extern Block* etheriq(Ether*, Block*, int);
+extern void addethercard(char*, int(*)(Ether*));
+extern ulong ethercrc(uchar*, int);
+extern int parseether(uchar*, char*);
+
+#define NEXT(x, l) (((x)+1)%(l))
+#define PREV(x, l) (((x) == 0) ? (l)-1: (x)-1)
+#define HOWMANY(x, y) (((x)+((y)-1))/(y))
+#define ROUNDUP(x, y) (HOWMANY((x), (y))*(y))
--- /dev/null
+++ b/sys/src/9/sgi/etherseeq.c
@@ -1,0 +1,415 @@
+#include "u.h"
+#include "../port/lib.h"
+#include "mem.h"
+#include "dat.h"
+#include "fns.h"
+#include "io.h"
+#include "../port/netif.h"
+#include "etherif.h"
+
+typedef struct Hio Hio;
+typedef struct Desc Desc;
+typedef struct Ring Ring;
+typedef struct Ctlr Ctlr;
+
+/*
+ * SEEQ 8003 interfaced to HPC3 (very different from IP20)
+ */
+struct Hio
+{
+ ulong unused0[20480];
+ ulong crbp; /* current receive buf desc ptr */
+ ulong nrbdp; /* next receive buf desc ptr */
+ ulong unused1[1022];
+ ulong rbc; /* receive byte count */
+ ulong rstat; /* receiver status */
+ ulong rgio; /* receive gio fifo ptr */
+ ulong rdev; /* receive device fifo ptr */
+ ulong unused2;
+ ulong ctl; /* interrupt, channel reset, buf oflow */
+ ulong dmacfg; /* dma configuration */
+ ulong piocfg; /* pio configuration */
+ ulong unused3[1016];
+ ulong cxbdp; /* current xmit buf desc ptr */
+ ulong nxbdp; /* next xmit buffer desc. pointer */
+ ulong unused4[1022];
+ ulong xbc; /* xmit byte count */
+ ulong xstat;
+ ulong xgio; /* xmit gio fifo ptr */
+ ulong xdev; /* xmit device fifo ptr */
+ ulong unused5[1020];
+ ulong crbdp; /* current receive descriptor ptr */
+ ulong unused6[2047];
+ ulong cpfxbdp; /* current/previous packet 1st xmit */
+ ulong ppfxbdp; /* desc ptr */
+ ulong unused7[59390];
+ ulong eaddr[6]; /* seeq station address wo */
+ ulong csr; /* seeq receiver cmd/status reg */
+ ulong csx; /* seeq transmitter cmd/status reg */
+};
+
+enum
+{ /* ctl */
+ Cover= 0x08, /* receive buffer overflow */
+ Cnormal=0x00, /* 1=normal, 0=loopback */
+ Cint= 0x02, /* interrupt (write 1 to clear) */
+ Creset= 0x01, /* ethernet channel reset */
+
+ /* xstat */
+ Xdma= 0x200, /* dma active */
+ Xold= 0x080, /* register has been read */
+ Xok= 0x008, /* transmission was successful */
+ Xmaxtry=0x004, /* transmission failed after 16 attempts */
+ Xcoll= 0x002, /* transmission collided */
+ Xunder= 0x001, /* transmitter underflowed */
+
+ /* csx */
+ Xreg0= 0x00, /* access reg bank 0 incl station addr */
+ XIok= 0x08,
+ XImaxtry=0x04,
+ XIcoll= 0x02,
+ XIunder=0x01,
+
+ /* rstat */
+ Rlshort=0x800, /* [small len in received frame] */
+ Rdma= 0x200, /* dma active */
+ Rold= 0x80, /* register has been read */
+ Rok= 0x20, /* received good frame */
+ Rend= 0x10, /* received end of frame */
+ Rshort= 0x08, /* received short frame */
+ Rdrbl= 0x04, /* dribble error */
+ Rcrc= 0x02, /* CRC error */
+ Rover= 0x01, /* overflow error */
+
+ /* csr */
+ Rsmb= 0xc0, /* receive station/broadcast/multicast frames */
+ Rsb= 0x80, /* receive station/broadcast frames */
+ Rprom= 0x40, /* receive all frames */
+ RIok= 0x20, /* interrupt on good frame */
+ RIend= 0x10, /* interrupt on end of frame */
+ RIshort=0x08, /* interrupt on short frame */
+ RIdrbl= 0x04, /* interrupt on dribble error */
+ RIcrc= 0x02, /* interrupt on CRC error */
+ RIover= 0x01, /* interrupt on overflow error */
+
+ HPC_MODNORM= 0x0, /* mode: 0=normal, 1=loopback */
+ HPC_FIX_INTR= 0x8000, /* start timeout counter after */
+ HPC_FIX_EOP= 0x4000, /* rcv_eop_intr/eop_in_chip is set */
+ HPC_FIX_RXDC= 0x2000, /* clear eop status upon rxdc */
+};
+
+struct Desc
+{
+ ulong addr; /* addr */
+ ulong count; /* eox / eop / busy / xie / count:13 */
+ ulong next;
+ uchar* base;
+};
+
+struct Ring
+{
+ Rendez;
+ int size;
+ uchar* base;
+ Desc* head;
+ Desc* tail;
+};
+
+enum
+{
+ Eor= 1<<31, /* end of ring */
+ Eop= 1<<30,
+ Ie= 1<<29,
+ Busy= 1<<24,
+ Empty= 1<<14, /* no data here */
+};
+
+enum {
+ Rbsize = ETHERMAXTU+3,
+};
+
+struct Ctlr
+{
+ int attach;
+
+ Hio *io;
+
+ Ring rx;
+ Ring tx;
+};
+
+static ulong dummy;
+
+static void
+interrupt(Ureg *, void *arg)
+{
+ Ether *edev;
+ Ctlr *ctlr;
+ Hio *io;
+ uint s;
+
+ edev = arg;
+ ctlr = edev->ctlr;
+ io = ctlr->io;
+ s = io->ctl;
+ if(s & Cover)
+ io->ctl = Cnormal | Cover;
+ if(s & Cint) {
+ io->ctl = Cnormal | Cint;
+ wakeup(&ctlr->rx);
+ }
+}
+
+static int
+notempty(void *arg)
+{
+ Ctlr *ctlr = arg;
+ Hio *io;
+
+ io = ctlr->io;
+ dummy = io->piocfg;
+ if((io->rstat & Rdma) == 0)
+ return 1;
+ return (IO(Desc, ctlr->rx.head->next)->count & Empty) == 0;
+}
+
+static void
+rxproc(void *arg)
+{
+ Ether *edev = arg;
+ Ctlr *ctlr;
+ Hio *io;
+ Block *b;
+ Desc *p;
+ int n;
+
+ while(waserror())
+ ;
+
+ ctlr = edev->ctlr;
+ io = ctlr->io;
+ for(p = IO(Desc, ctlr->rx.head->next);; p = IO(Desc, p->next)){
+ while((p->count & Empty) != 0){
+ io->rstat = Rdma;
+ tsleep(&ctlr->rx, notempty, ctlr, 500);
+ }
+ n = Rbsize - (p->count & 0x3fff)-3;
+ if(n >= ETHERMINTU){
+ if((p->base[n+2] & Rok) != 0){
+ b = allocb(n);
+ b->wp += n;
+ memmove(b->rp, p->base+2, n);
+ etheriq(edev, b, 1);
+ }
+ }
+ p->addr = PADDR(p->base);
+ p->count = Ie|Empty|Rbsize;
+ ctlr->rx.head = p;
+ }
+}
+
+static void
+txproc(void *arg)
+{
+ Ether *edev = arg;
+ Ctlr *ctlr;
+ Hio *io;
+ Block *b;
+ Desc *p;
+ int clean, n;
+
+ while(waserror())
+ ;
+
+ ctlr = edev->ctlr;
+ io = ctlr->io;
+ clean = ctlr->tx.size / 2;
+ for(p = IO(Desc, ctlr->tx.tail->next); (b = qbread(edev->oq, 1000000)) != nil; p = IO(Desc, p->next)){
+ while(!clean){
+ splhi();
+ p = ctlr->tx.head;
+ dummy = io->piocfg;
+ ctlr->tx.head = IO(Desc, io->nxbdp & ~0xf);
+ spllo();
+ while(p != ctlr->tx.head){
+ if((p->count & Busy) == 0)
+ break;
+ clean++;
+ p->count = Eor|Eop;
+ p = IO(Desc, p->next);
+ }
+
+ p = IO(Desc, ctlr->tx.tail->next);
+ if(clean)
+ break;
+
+ io->xstat = Xdma;
+ tsleep(&ctlr->tx, return0, nil, 10);
+ }
+ clean--;
+
+ n = BLEN(b);
+ if(n > ETHERMAXTU)
+ n = ETHERMAXTU;
+ memmove(p->base, b->rp, n);
+
+ p->addr = PADDR(p->base);
+ p->count = Eor|Eop|Busy|n;
+
+ ctlr->tx.tail->count &= ~Eor;
+ ctlr->tx.tail = p;
+
+ io->xstat = Xdma;
+
+ freeb(b);
+ }
+}
+
+static void
+allocring(Ring *r, int n)
+{
+ uchar *b;
+ Desc *p;
+ int m;
+
+ r->size = n;
+
+ m = n*BY2PG/2;
+ b = xspanalloc(m, BY2PG, 0);
+ dcflush(b, m);
+ b = IO(uchar, b);
+ memset(b, 0, m);
+ r->base = b;
+
+ m = n*sizeof(Desc);
+ p = xspanalloc(m, BY2PG, 0);
+ dcflush(p, m);
+ p = IO(Desc, p);
+ memset(p, 0, m);
+ r->head = r->tail = p;
+
+ for(m=0; m<n; m++, p++, b += (BY2PG/2)){
+ p->base = b;
+ p->next = PADDR(p+1);
+ }
+ p[-1].next = PADDR(r->head);
+}
+
+static int
+init(Ether *edev)
+{
+ Ctlr *ctlr;
+ Desc *p;
+ Hio *io;
+ int i;
+
+ io = IO(Hio, edev->port);
+ ctlr = edev->ctlr;
+ ctlr->io = io;
+
+ io->csx = Xreg0;
+ allocring(&ctlr->rx, 256);
+ allocring(&ctlr->tx, 64);
+
+ io->rstat = 0;
+ io->xstat = 0;
+ io->ctl = Cnormal | Creset | Cint;
+ delay(10);
+ io->ctl = Cnormal;
+ io->csx = 0;
+ io->csr = 0;
+
+ io->dmacfg |= HPC_FIX_INTR | HPC_FIX_EOP | HPC_FIX_RXDC;
+
+ p = ctlr->rx.head;
+ do {
+ p->addr = PADDR(p->base);
+ p->count = Ie|Empty|Rbsize;
+ p = IO(Desc, p->next);
+ } while(p != ctlr->rx.head);
+ io->crbdp = PADDR(p);
+ io->nrbdp = p->next;
+
+ p = ctlr->tx.tail;
+ do {
+ p->addr = 0;
+ p->count = Eor|Eop;
+ p = IO(Desc, p->next);
+ } while(p != ctlr->tx.tail);
+ ctlr->tx.head = IO(Desc, p->next);
+ io->cxbdp = PADDR(p);
+ io->nxbdp = p->next;
+
+ for(i=0; i<6; i++)
+ io->eaddr[i] = edev->ea[i];
+
+ io->csx = 0; /* XIok | XImaxtry | XIcoll | XIunder; -- no interrupts needed */
+ io->csr = Rprom | RIok|RIend|RIshort|RIdrbl|RIcrc;
+
+ return 0;
+}
+
+/*
+ * do nothing for promiscuous() and multicast() as we
+ * are always in promisc mode.
+ */
+static void
+promiscuous(void*, int)
+{
+}
+static void
+multicast(void*, uchar*, int)
+{
+}
+
+static void
+attach(Ether *edev)
+{
+ Ctlr *ctlr;
+
+ ctlr = edev->ctlr;
+ if(ctlr->attach)
+ return;
+ ctlr->attach = 1;
+ kproc("#0rx", rxproc, edev);
+ kproc("#0tx", txproc, edev);
+}
+
+static int
+pnp(Ether *edev)
+{
+ static Ctlr ct;
+ char *s;
+
+ /* only one controller */
+ if(edev->ctlrno != 0)
+ return -1;
+
+ /* get mac address from nvram */
+ if((s = getconf("eaddr")) != nil)
+ parseether(edev->ea, s);
+
+ edev->ctlr = &ct;
+ edev->port = HPC3_ETHER;
+ edev->irq = IRQENET;
+ edev->irqlevel = hpc3irqlevel(edev->irq);
+ edev->ctlr = &ct;
+ edev->promiscuous = promiscuous;
+ edev->multicast = multicast;
+ edev->interrupt = interrupt;
+ edev->attach = attach;
+ edev->arg = edev;
+ edev->mbps = 10;
+ edev->link = 1;
+ if(init(edev) < 0){
+ edev->ctlr = nil;
+ return -1;
+ }
+ return 0;
+}
+
+void
+etherseeqlink(void)
+{
+ addethercard("seeq", pnp);
+}
--- /dev/null
+++ b/sys/src/9/sgi/faultmips.c
@@ -1,0 +1,248 @@
+#include "u.h"
+#include "../port/lib.h"
+#include "mem.h"
+#include "dat.h"
+#include "fns.h"
+#include "ureg.h"
+#include "../port/error.h"
+#include "io.h"
+
+enum {
+ Debug = 0,
+};
+
+typedef struct Fault Fault;
+struct Fault {
+ uintptr va;
+ ulong pid;
+ uintptr pc;
+ int cnt;
+ char *prog;
+ int code;
+};
+
+extern char *excname[];
+
+static Fault lflt, maxflt;
+
+ulong*
+reg(Ureg *ur, int regno)
+{
+ ulong *l;
+
+ switch(regno) {
+ case 31: return &ur->r31;
+ case 30: return &ur->r30;
+ case 29: return &ur->sp;
+ default:
+ l = &ur->r1;
+ return &l[regno-1];
+ }
+}
+
+/*
+ * Ask if the instruction at EPC could have cause this badvaddr
+ */
+int
+tstbadvaddr(Ureg *ur)
+{
+ int rn;
+ ulong iw, off, ea;
+
+ iw = ur->pc;
+ if(ur->cause & BD)
+ iw += 4;
+
+ if(seg(up, iw, 0) == 0)
+ return 0;
+
+ iw = *(ulong*)iw;
+
+/* print("iw: %#lux\n", iw); /**/
+
+ switch((iw>>26) & 0x3f) {
+ default:
+ return 1;
+ case 0x20: /* LB */
+ case 0x24: /* LBU */
+ /* LD */
+ case 0x35:
+ case 0x36:
+ case 0x37: /* LDCz */
+ case 0x1A: /* LDL */
+ case 0x1B: /* LDR */
+ case 0x21: /* LH */
+ case 0x25: /* LHU */
+ case 0x30: /* LL */
+ case 0x34: /* LLD */
+ case 0x23: /* LW */
+ case 0x31:
+ case 0x32: /* LWCz possible 0x33 */
+ case 0x27: /* LWU */
+ case 0x22: /* LWL */
+ case 0x26: /* LWR */
+ break;
+
+ case 0x28: /* SB */
+ case 0x38: /* SC */
+ case 0x3C: /* SCD */
+ case 0x3D:
+ case 0x3E:
+ case 0x3F: /* SDCz */
+ case 0x2C: /* SDL */
+ case 0x2D: /* SDR */
+ case 0x29: /* SH */
+ case 0x2B: /* SW */
+ case 0x39:
+ case 0x3A: /* SWCz */
+ case 0x2A: /* SWL */
+ case 0x2E: /* SWR */
+ break;
+ }
+
+ off = iw & 0xffff;
+ if(off & 0x8000)
+ off |= ~0xffff;
+
+ rn = (iw>>21) & 0x1f;
+ ea = *reg(ur, rn);
+ if(rn == 0)
+ ea = 0;
+ ea += off;
+
+ /* print("ea %#lux %#lux(R%d) bv %#lux pc %#lux\n", ea, off, rn, ur->badvaddr, ur->pc); /**/
+
+ if(ur->badvaddr == ea)
+ return 0;
+
+ return 1;
+}
+
+/*
+ * we think we get consecutive page faults from unlucky combinations of
+ * scheduling and stlb hashes, and they only happen with 16K pages.
+ * however, we also get page faults while servicing the exact same fault.
+ * more than 5 consecutive faults is unusual, now that we have a better
+ * hash function.
+ *
+ * this can be helpful during mmu and cache debugging.
+ */
+static int
+ckfaultstuck(Ureg *ur, int read, int code)
+{
+ uintptr pc, va;
+
+ va = ur->badvaddr;
+ pc = ur->pc;
+ if (va != lflt.va || up->pid != lflt.pid || pc != lflt.pc ||
+ code != lflt.code) {
+ /* at least one address or cause is different from last time */
+ lflt.cnt = 1;
+ lflt.va = va;
+ lflt.pid = up->pid;
+ lflt.pc = pc;
+ lflt.code = code;
+ return 0;
+ }
+ ++lflt.cnt;
+ if (lflt.cnt >= 1000) /* fixfault() isn't fixing underlying cause? */
+ panic("fault: %d consecutive faults for va %#p", lflt.cnt, va);
+ if (lflt.cnt > maxflt.cnt) {
+ maxflt.cnt = lflt.cnt;
+ maxflt.va = va;
+ maxflt.pid = up->pid;
+ maxflt.pc = pc;
+ kstrdup(&maxflt.prog, up->text);
+ }
+
+ /* we're servicing that fault now! */
+ /* adjust the threshold and program name to suit */
+ if (lflt.cnt < 5 || strncmp(up->text, "8l", 2) != 0)
+ return 0;
+ iprint("%d consecutive faults for va %#p at pc %#p in %s "
+ "pid %ld\n", lflt.cnt, lflt.va, pc, up->text, lflt.pid);
+ iprint("\t%s: %s%s r31 %#lux tlbvirt %#lux\n",
+ excname[code], va == pc? "[instruction] ": "",
+ (read? "read": "write"), ur->r31, tlbvirt());
+ return 0;
+}
+
+char *
+faultsprint(char *p, char *ep)
+{
+ if (Debug)
+ p = seprint(p, ep,
+ "max consecutive faults %d for va %#p in %s\n",
+ maxflt.cnt, maxflt.va, maxflt.prog);
+ return p;
+}
+
+/*
+ * find out fault address and type of access.
+ * Call common fault handler.
+ */
+void
+faultmips(Ureg *ur, int user, int code)
+{
+ int read;
+ ulong addr;
+ char *p, buf[ERRMAX];
+
+ addr = ur->badvaddr;
+ addr &= ~(BY2PG-1);
+
+ read = !(code==CTLBM || code==CTLBS);
+
+/* print("fault: %s code %d va %#p pc %#p r31 %#lux tlbvirt %#lux\n",
+ up->text, code, ur->badvaddr, ur->pc, ur->r31, tlbvirt());/**/
+
+ if (Debug && ckfaultstuck(ur, read, code) || fault(addr, read) == 0)
+ return;
+
+ if(user) {
+ p = "store";
+ if(read)
+ p = "load";
+ snprint(buf, sizeof buf, "sys: trap: fault %s addr=%#lux r31=%#lux",
+ p, ur->badvaddr, ur->r31);
+ postnote(up, 1, buf, NDebug);
+ return;
+ }
+
+ splhi();
+ serialoq = nil;
+ print("kernel %s vaddr=%#lux\n", excname[code], ur->badvaddr);
+ print("st=%#lux pc=%#lux r31=%#lux sp=%#lux\n",
+ ur->status, ur->pc, ur->r31, ur->sp);
+ dumpregs(ur);
+ panic("fault");
+}
+
+/*
+ * called in syscallfmt.c, sysfile.c, sysproc.c
+ */
+void
+validalign(uintptr addr, unsigned align)
+{
+ /*
+ * Plan 9 is a 32-bit O/S, and the hardware it runs on
+ * does not usually have instructions which move 64-bit
+ * quantities directly, synthesizing the operations
+ * with 32-bit move instructions. Therefore, the compiler
+ * (and hardware) usually only enforce 32-bit alignment,
+ * if at all.
+ *
+ * Take this out if the architecture warrants it.
+ */
+ if(align == sizeof(vlong))
+ align = sizeof(long);
+
+ /*
+ * Check align is a power of 2, then addr alignment.
+ */
+ if((align != 0 && !(align & (align-1))) && !(addr & (align-1)))
+ return;
+ postnote(up, 1, "sys: odd address", NDebug);
+ error(Ebadarg);
+ /*NOTREACHED*/
+}
--- /dev/null
+++ b/sys/src/9/sgi/fns.h
@@ -1,0 +1,68 @@
+#include "../port/portfns.h"
+
+ulong arcs(ulong, ...);
+void arcsconsinit(void);
+void arcsproc(void*);
+void arcsputc(char);
+int argcgetc(void);
+ulong cankaddr(ulong);
+void clock(Ureg*);
+void clockinit(void);
+int cmpswap(long*, long, long);
+void coherence(void);
+void cycles(uvlong *);
+void dcflush(void*, ulong);
+void evenaddr(uintptr);
+void faultmips(Ureg*, int, int);
+ulong fcr31(void);
+void fptrap(Ureg*);
+char* getconf(char*);
+ulong getpagemask(void);
+ulong getrandom(void);
+int gettlbp(ulong, ulong*);
+ulong gettlbvirt(int);
+int hpc3irqlevel(int);
+void icflush(void *, ulong);
+void idlehands(void);
+void introff(int);
+void intron(int);
+void kfault(Ureg*);
+KMap* kmap(Page*);
+void kmapinit(void);
+void kmapinval(void);
+void kunmap(KMap*);
+void links(void);
+ulong prid(void);
+void procfork(Proc *);
+void procrestore(Proc *);
+void procsave(Proc *);
+void procsetup(Proc *);
+void purgetlb(int);
+void puttlbx(int, ulong, ulong, ulong, int);
+ulong rdcompare(void);
+ulong rdcount(void);
+ulong* reg(Ureg*, int);
+void restfpregs(FPsave*, ulong);
+void intrenable(int, void(*)(Ureg *, void *), void *);
+void setpagemask(ulong);
+void setwired(ulong);
+ulong stlbhash(ulong);
+void syscall(Ureg*);
+int tas(ulong*);
+void tlbinit(void);
+ulong tlbvirt(void);
+void touser(void*);
+#define userureg(ur) ((ur)->status & KUSER)
+void validalign(uintptr, unsigned);
+void wrcompare(ulong);
+void wrcount(ulong);
+
+#define PTR2UINT(p) ((uintptr)(p))
+#define UINT2PTR(i) ((void*)(i))
+
+#define waserror() (up->nerrlab++, setlabel(&up->errlab[up->nerrlab-1]))
+
+#define KADDR(a) ((void*)((ulong)(a)|KSEG0))
+#define PADDR(a) ((ulong)(a)&~KSEGM)
+
+#define KSEG1ADDR(a) ((void*)((ulong)(a)|KSEG1))
--- /dev/null
+++ b/sys/src/9/sgi/fptrap.c
@@ -1,0 +1,268 @@
+#include "u.h"
+#include "../port/lib.h"
+#include "mem.h"
+#include "dat.h"
+#include "fns.h"
+#include "ureg.h"
+#include "io.h"
+#include "../port/error.h"
+
+enum /* op */
+{
+ ABS = 5,
+ ADD = 0,
+ CVTD = 33,
+ CVTS = 32,
+ CVTW = 36,
+ DIV = 3,
+ MOV = 6,
+ MUL = 2,
+ NEG = 7,
+ SUB = 1,
+};
+
+static int fpunimp(ulong);
+static ulong branch(Ureg*, ulong);
+
+void
+fptrap(Ureg *ur)
+{
+ ulong iw, npc;
+
+ if((up->fpsave.fpstatus&(1<<17)) == 0)
+ return;
+
+ if(ur->cause & (1<<31))
+ iw = *(ulong*)(ur->pc+4);
+ else
+ iw = *(ulong*)ur->pc;
+
+ if(fpunimp(iw) == 0)
+ return;
+
+ if(ur->cause & (1<<31)){
+ npc = branch(ur, up->fpsave.fpstatus);
+ if(npc == 0)
+ return;
+ ur->pc = npc;
+ }
+ else
+ ur->pc += 4;
+
+ up->fpsave.fpstatus &= ~(1<<17);
+}
+
+static void
+unpack(FPsave *f, int fmt, int reg, int *sign, int *exp)
+{
+ *sign = 1;
+ if(f->reg[reg] & 0x80000000)
+ *sign = -1;
+
+ switch(fmt){
+ case 0:
+ *exp = ((f->reg[reg]>>23)&0xFF) - ((1<<7)-2);
+ break;
+ case 1:
+ if(reg & 1) /* shouldn't happen */
+ reg &= ~1;
+ *exp = ((f->reg[reg]>>20)&0x7FF) - ((1<<10)-2);
+ break;
+ }
+}
+
+static void
+zeroreg(FPsave *f, int fmt, int reg, int sign)
+{
+ int size;
+
+ size = 0;
+ switch(fmt){
+ case 0:
+ size = 4;
+ break;
+ case 1:
+ if(reg & 1)
+ reg &= ~1;
+ size = 8;
+ break;
+ }
+ memset(&f->reg[reg], 0, size);
+ if(sign < 0)
+ f->reg[reg] |= 0x80000000;
+}
+
+static int
+fpunimp(ulong iw)
+{
+ int ss, st, sd;
+ int es, et, ed;
+ int maxe, maxm;
+ ulong op, fmt, ft, fs, fd;
+
+ if((iw>>25) != 0x23)
+ return 0;
+ op = iw & ((1<<6)-1);
+ fmt = (iw>>21) & ((1<<4)-1);
+ ft = (iw>>16) & ((1<<5)-1);
+ fs = (iw>>11) & ((1<<5)-1);
+ fd = (iw>>6) & ((1<<5)-1);
+ unpack(&up->fpsave, fmt, fs, &ss, &es);
+ unpack(&up->fpsave, fmt, ft, &st, &et);
+ ed = 0;
+ maxe = 0;
+ maxm = 0;
+ switch(fmt){
+ case 0:
+ maxe = 1<<7;
+ maxm = 24;
+ break;
+ case 1:
+ maxe = 1<<10;
+ maxm = 53;
+ break;
+ }
+ switch(op){
+ case ABS:
+ up->fpsave.reg[fd] &= ~0x80000000;
+ return 1;
+
+ case NEG:
+ up->fpsave.reg[fd] ^= 0x80000000;
+ return 1;
+
+ case SUB:
+ st = -st;
+ case ADD:
+ if(es<-(maxe-maxm) && et<-(maxe-maxm))
+ ed = -maxe;
+ if(es > et)
+ sd = es;
+ else
+ sd = et;
+ break;
+
+ case DIV:
+ et = -et;
+ case MUL:
+ sd = 1;
+ if(ss != st)
+ sd = -1;
+ ed = es + et;
+ break;
+
+ case CVTS:
+ if(fmt != 1)
+ return 0;
+ fmt = 0; /* convert FROM double TO single */
+ maxe = 1<<7;
+ ed = es;
+ sd = ss;
+ break;
+
+ default: /* probably a compare */
+ return 0;
+ }
+ if(ed <= -(maxe-5)){ /* guess: underflow */
+ zeroreg(&up->fpsave, fmt, fd, sd);
+ /* Set underflow exception and sticky */
+ up->fpsave.fpstatus |= (1<<3)|(1<<13);
+ return 1;
+ }
+ return 0;
+}
+
+static ulong
+branch(Ureg *ur, ulong fcr31)
+{
+ ulong iw, npc, rs, rt, rd, offset;
+
+ iw = *(ulong*)ur->pc;
+ rs = (iw>>21) & 0x1F;
+ if(rs)
+ rs = *reg(ur, rs);
+ rt = (iw>>16) & 0x1F;
+ if(rt)
+ rt = *reg(ur, rt);
+ offset = iw & ((1<<16)-1);
+ if(offset & (1<<15)) /* sign extend */
+ offset |= ~((1<<16)-1);
+ offset <<= 2;
+ /*
+ * Integer unit jumps first
+ */
+ switch(iw>>26){
+ case 0: /* SPECIAL: JR or JALR */
+ switch(iw&0x3F){
+ case 0x09: /* JALR */
+ rd = (iw>>11) & 0x1F;
+ if(rd)
+ *reg(ur, rd) = ur->pc+8;
+ /* fall through */
+ case 0x08: /* JR */
+ return rs;
+ default:
+ return 0;
+ }
+ case 1: /* BCOND */
+ switch((iw>>16) & 0x1F){
+ case 0x10: /* BLTZAL */
+ ur->r31 = ur->pc + 8;
+ /* fall through */
+ case 0x00: /* BLTZ */
+ if((long)rs < 0)
+ return ur->pc+4 + offset;
+ return ur->pc + 8;
+ case 0x11: /* BGEZAL */
+ ur->r31 = ur->pc + 8;
+ /* fall through */
+ case 0x01: /* BGEZ */
+ if((long)rs >= 0)
+ return ur->pc+4 + offset;
+ return ur->pc + 8;
+ default:
+ return 0;
+ }
+ case 3: /* JAL */
+ ur->r31 = ur->pc+8;
+ /* fall through */
+ case 2: /* JMP */
+ npc = iw & ((1<<26)-1);
+ npc <<= 2;
+ return npc | (ur->pc&0xF0000000);
+ case 4: /* BEQ */
+ if(rs == rt)
+ return ur->pc+4 + offset;
+ return ur->pc + 8;
+ case 5: /* BNE */
+ if(rs != rt)
+ return ur->pc+4 + offset;
+ return ur->pc + 8;
+ case 6: /* BLEZ */
+ if((long)rs <= 0)
+ return ur->pc+4 + offset;
+ return ur->pc + 8;
+ case 7: /* BGTZ */
+ if((long)rs > 0)
+ return ur->pc+4 + offset;
+ return ur->pc + 8;
+ }
+ /*
+ * Floating point unit jumps
+ */
+ if((iw>>26) == 0x11) /* COP1 */
+ switch((iw>>16) & 0x3C1){
+ case 0x101: /* BCT */
+ case 0x181: /* BCT */
+ if(fcr31 & (1<<23))
+ return ur->pc+4 + offset;
+ return ur->pc + 8;
+ case 0x100: /* BCF */
+ case 0x180: /* BCF */
+ if(!(fcr31 & (1<<23)))
+ return ur->pc+4 + offset;
+ return ur->pc + 8;
+ }
+ /* shouldn't get here */
+ return 0;
+}
--- /dev/null
+++ b/sys/src/9/sgi/indy
@@ -1,0 +1,48 @@
+dev
+ root
+ cons
+ uart
+ mnt
+ srv
+ shr
+ proc
+ env
+ pipe
+ dup
+ ether netif
+ ip arp chandial ip ipv6 ipaux iproute netlog nullmedium pktmedium inferno
+ ssl
+ tls
+ cap
+ kprof
+# sd
+# draw screen
+# mouse
+
+link
+ etherseeq
+ ethermedium
+ loopbackmedium
+
+misc
+ uartarcs
+
+ip
+ tcp
+ udp
+ rudp
+ ipifc
+ icmp
+ icmp6
+ gre
+ ipmux
+ esp
+
+port
+ int cpuserver = 0;
+
+bootdir
+ /$objtype/bin/paqfs
+ /$objtype/bin/auth/factotum
+ boot
+ bootfs.paq
--- /dev/null
+++ b/sys/src/9/sgi/init9.s
@@ -1,0 +1,8 @@
+TEXT _main(SB), $8
+ MOVW $setR30(SB), R30
+ MOVW $boot(SB), R1
+ ADDU $12, R29, R2 /* get a pointer to 0(FP) */
+ MOVW R1, 4(R29)
+ MOVW R2, 8(R29)
+ JAL startboot(SB)
+
--- /dev/null
+++ b/sys/src/9/sgi/io.h
@@ -1,0 +1,47 @@
+enum {
+ Mhz = 1000*1000,
+};
+
+#define IO(t,x) ((t*)(KSEG1|((ulong)x)))
+
+/* Interrupts */
+#define IRQGIO0 0
+#define IRQSCSI 1
+#define IRQSCSI1 2
+#define IRQENET 3
+#define IRQGDMA 4
+#define IRQPLP 5
+#define IRQGIO1 6
+#define IRQLCL2 7
+#define IRQISDN_ISAC 8
+#define IRQPOWER 9
+#define IRQISDN_HSCX 10
+#define IRQLCL3 11
+#define IRQHPCDMA 12
+#define IRQACFAIL 13
+#define IRQVIDEO 14
+#define IRQGIO2 15
+#define IRQEISA 19
+#define IRQKBDMS 20
+#define IRQDUART 21
+#define IRQDRAIN0 22
+#define IRQDRAIN1 23
+#define IRQGIOEXP0 22
+#define IRQGIOEXP1 23
+
+/*
+ * Local Interrupt registers (INT2)
+ */
+#define INT2_IP20 0x1fb801c0
+#define INT2_IP22 0x1fbd9000
+#define INT2_IP24 0x1fbd9880
+
+#define INT2_BASE INT2_IP24 /* indy */
+
+#define LIO_0_ISR (INT2_BASE+0x3)
+#define LIO_0_MASK (INT2_BASE+0x7)
+
+#define HPC3_ETHER 0x1fb80000
+
+#define MEMCFG0 0x1fa000c4 /* mem. size config. reg. 0 (w, rw) */
+#define MEMCFG1 0x1fa000cc /* mem. size config. reg. 1 (w, rw) */
--- /dev/null
+++ b/sys/src/9/sgi/l.s
@@ -1,0 +1,834 @@
+#include "mem.h"
+
+#define SP R29
+
+#define NOOP NOR R0, R0, R0
+#define WAIT NOOP; NOOP
+#define RETURN RET; NOOP
+#define CONST(i, v) MOVW $((i) & 0xffff0000), v; OR $((i) & 0xffff), v;
+#define GETMACH(r) CONST(MACHADDR, r)
+
+/*
+ * R4000 instructions
+ */
+#define ERET WORD $0x42000018
+#define LL(base, rt) WORD $((060<<26)|((base)<<21)|((rt)<<16))
+#define SC(base, rt) WORD $((070<<26)|((base)<<21)|((rt)<<16))
+
+#define MFC0(src,sel,dst) WORD $(0x40000000|((src)<<11)|((dst)<<16)|(sel))
+#define MTC0(src,dst,sel) WORD $(0x40800000|((dst)<<11)|((src)<<16)|(sel))
+#define RDHWR(hwr, r) WORD $(0x7c00003b|((hwr)<<11)|((r)<<16))
+
+/*
+ * cache manipulation
+ */
+#define CACHE BREAK /* overloaded op-code */
+
+#define PI R((0 /* primary I cache */
+#define PD R((1 /* primary D cache */
+#define SD R((3 /* secondary combined I/D cache */
+
+#define IWBI (0<<2))) /* index write-back invalidate */
+#define ILT (1<<2))) /* index load tag */
+#define IST (2<<2))) /* index store tag */
+#define CDE (3<<2))) /* create dirty exclusive */
+#define HINV (4<<2))) /* hit invalidate */
+#define HWBI (5<<2))) /* hit write back invalidate */
+#define HWB (6<<2))) /* hit write back */
+#define HSV (7<<2))) /* hit set virtual */
+
+ NOSCHED
+
+/*
+ * Boot only processor
+ */
+
+TEXT start(SB), $-4
+ MOVW $setR30(SB), R30
+
+ MOVW $CU1, R1
+ MOVW R1, M(STATUS)
+ WAIT
+
+ MOVW $(0x1C<<7), R1
+ MOVW R1, FCR31 /* permit only inexact and underflow */
+ NOOP
+ MOVD $0.5, F26
+ SUBD F26, F26, F24
+ ADDD F26, F26, F28
+ ADDD F28, F28, F30
+
+ MOVD F24, F0
+ MOVD F24, F2
+ MOVD F24, F4
+ MOVD F24, F6
+ MOVD F24, F8
+ MOVD F24, F10
+ MOVD F24, F12
+ MOVD F24, F14
+ MOVD F24, F16
+ MOVD F24, F18
+ MOVD F24, F20
+ MOVD F24, F22
+
+ MOVW $MACHADDR, R(MACH)
+ ADDU $(MACHSIZE-BY2V), R(MACH), SP
+
+ MOVW R(MACH), R1
+clrmach:
+ MOVW R0, (R1)
+ ADDU $BY2WD, R1
+ BNE R1, SP, clrmach
+ NOOP
+
+ MOVW $edata(SB), R1
+ MOVW $end(SB), R2
+clrbss:
+ MOVB R0, (R1)
+ ADDU $1, R1
+ BNE R1, R2, clrbss
+ NOOP
+
+ MOVW R0, 0(R(MACH)) /* m->machno = 0 */
+ MOVW R0, R(USER) /* up = nil */
+
+ JAL main(SB)
+ NOOP
+
+TEXT arcs(SB), $256
+ MOVW R24, 0x80(SP)
+ MOVW R25, 0x84(SP)
+ MOVW R26, 0x88(SP)
+ MOVW R27, 0x8C(SP)
+
+ MOVW $SPBADDR, R4
+ MOVW 0x20(R4), R5
+ ADDU R1, R5
+ MOVW (R5), R2
+
+ MOVW 16(FP), R7
+ MOVW 12(FP), R6
+ MOVW 8(FP), R5
+ MOVW 4(FP), R4
+
+ JAL (R2)
+ NOOP
+
+ MOVW $setR30(SB), R30
+
+ MOVW 0x80(SP), R24
+ MOVW 0x84(SP), R25
+ MOVW 0x88(SP), R26
+ MOVW 0x8C(SP), R27
+
+ MOVW R2, R1
+ RETURN
+
+/*
+ * Take first processor into user mode
+ * - argument is stack pointer to user
+ */
+
+TEXT touser(SB), $-4
+ MOVW M(STATUS), R4
+ MOVW $(UTZERO+32), R2 /* header appears in text */
+ MOVW R2, M(EPC)
+ MOVW R1, SP
+ AND $(~KMODEMASK), R4
+ OR $(KUSER|IE|EXL), R4 /* switch to user mode, intrs on, exc */
+ MOVW R4, M(STATUS) /* " */
+ WAIT
+ ERET /* clears EXL */
+ NOOP
+
+/*
+ * manipulate interrupts
+ */
+
+/* enable an interrupt; bit is in R1 */
+TEXT intron(SB), $0
+ MOVW M(STATUS), R2
+ WAIT
+ OR R1, R2
+ MOVW R2, M(STATUS)
+ WAIT
+ RETURN
+
+/* disable an interrupt; bit is in R1 */
+TEXT introff(SB), $0
+ MOVW M(STATUS), R2
+ WAIT
+ XOR $-1, R1
+ AND R1, R2
+ MOVW R2, M(STATUS)
+ WAIT
+ RETURN
+
+TEXT splhi(SB), $0
+ MOVW R31, 12(R(MACH)) /* save PC in m->splpc */
+ MOVW M(STATUS), R1
+ WAIT
+ AND $~IE, R1, R2
+ MOVW R2, M(STATUS)
+ WAIT
+ RETURN
+
+TEXT splx(SB), $0
+ MOVW R31, 12(R(MACH)) /* save PC in m->splpc */
+ MOVW M(STATUS), R2
+ WAIT
+ AND $IE, R1
+ AND $~IE, R2
+ OR R2, R1
+ MOVW R1, M(STATUS)
+ WAIT
+ RETURN
+
+TEXT spllo(SB), $0
+ MOVW M(STATUS), R1
+ WAIT
+ OR $IE, R1, R2
+ MOVW R2, M(STATUS)
+ WAIT
+ RETURN
+
+TEXT spldone(SB), $0
+ RETURN
+
+TEXT islo(SB), $0
+ MOVW M(STATUS), R1
+ WAIT
+ AND $IE, R1
+ RETURN
+
+TEXT coherence(SB), $-4
+ RETURN
+
+/*
+ * process switching
+ */
+
+TEXT setlabel(SB), $-4
+ MOVW SP, 0(R1)
+ MOVW R31, 4(R1)
+ MOVW R0, R1
+ RETURN
+
+TEXT gotolabel(SB), $-4
+ MOVW 0(R1), SP
+ MOVW 4(R1), R31
+ MOVW $1, R1
+ RETURN
+
+/*
+ * the tlb routines need to be called at splhi.
+ */
+
+TEXT getwired(SB),$0
+ MOVW M(WIRED), R1
+ RETURN
+
+TEXT setwired(SB),$0
+ MOVW R1, M(WIRED)
+ RETURN
+
+TEXT getrandom(SB),$0
+ MOVW M(RANDOM), R1
+ RETURN
+
+TEXT getpagemask(SB),$0
+ MOVW M(PAGEMASK), R1
+ RETURN
+
+TEXT setpagemask(SB),$0
+ MOVW R1, M(PAGEMASK)
+ MOVW R0, R1 /* prevent accidents */
+ RETURN
+
+TEXT puttlbx(SB), $0 /* puttlbx(index, virt, phys0, phys1, pagemask) */
+ MOVW 4(FP), R2
+ MOVW 8(FP), R3
+ MOVW 12(FP), R4
+ MOVW $((2*BY2PG-1) & ~0x1fff), R5
+ MOVW R2, M(TLBVIRT)
+ MOVW R3, M(TLBPHYS0)
+ MOVW R4, M(TLBPHYS1)
+ MOVW R5, M(PAGEMASK)
+ MOVW R1, M(INDEX)
+ NOOP
+ NOOP
+ TLBWI
+ NOOP
+ RETURN
+
+TEXT tlbvirt(SB), $0
+ MOVW M(TLBVIRT), R1
+ NOOP
+ RETURN
+
+TEXT gettlbx(SB), $0 /* gettlbx(index, &entry) */
+ MOVW 4(FP), R4
+ MOVW R1, M(INDEX)
+ NOOP
+ NOOP
+ TLBR
+ NOOP
+ NOOP
+ NOOP
+ MOVW M(TLBVIRT), R1
+ MOVW M(TLBPHYS0), R2
+ MOVW M(TLBPHYS1), R3
+ NOOP
+ MOVW R1, 0(R4)
+ MOVW R2, 4(R4)
+ MOVW R3, 8(R4)
+ RETURN
+
+TEXT gettlbp(SB), $0 /* gettlbp(tlbvirt, &entry) */
+ MOVW 4(FP), R5
+ MOVW R1, M(TLBVIRT)
+ NOOP
+ NOOP
+ NOOP
+ TLBP
+ NOOP
+ NOOP
+ MOVW M(INDEX), R1
+ NOOP
+ BLTZ R1, gettlbp1
+ TLBR
+ NOOP
+ NOOP
+ NOOP
+ MOVW M(TLBVIRT), R2
+ MOVW M(TLBPHYS0), R3
+ MOVW M(TLBPHYS1), R4
+ NOOP
+ MOVW R2, 0(R5)
+ MOVW R3, 4(R5)
+ MOVW R4, 8(R5)
+gettlbp1:
+ RETURN
+
+TEXT gettlbvirt(SB), $0 /* gettlbvirt(index) */
+ MOVW R1, M(INDEX)
+ NOOP
+ NOOP
+ TLBR
+ NOOP
+ NOOP
+ NOOP
+ MOVW M(TLBVIRT), R1
+ NOOP
+ RETURN
+
+/*
+ * compute stlb hash index.
+ *
+ * M(TLBVIRT) [page & asid] in arg, result in arg.
+ * stir in swizzled asid; we get best results with asid in both high & low bits.
+ */
+#define STLBHASH(arg, tmp) \
+ AND $0xFF, arg, tmp; \
+ SRL $(PGSHIFT+1), arg; \
+ XOR tmp, arg; \
+ SLL $(STLBLOG-8), tmp; \
+ XOR tmp, arg; \
+ CONST (STLBSIZE-1, tmp); \
+ AND tmp, arg
+
+TEXT stlbhash(SB), $0 /* for mmu.c */
+ STLBHASH(R1, R2)
+ RETURN
+
+TEXT utlbmiss(SB), $-4
+ GETMACH (R26)
+ MOVW R27, 12(R26) /* m->splpc = R27 */
+
+ MOVW 16(R26), R27
+ ADDU $1, R27
+ MOVW R27,16(R26) /* m->tlbfault++ */
+
+ MOVW M(TLBVIRT), R27
+ NOOP
+ STLBHASH(R27, R26)
+
+ /* scale to a byte index (multiply by 12) */
+ SLL $1, R27, R26 /* × 2 */
+ ADDU R26, R27 /* × 3 */
+ SLL $2, R27 /* × 12 */
+
+ GETMACH (R26)
+ MOVW 4(R26), R26
+ ADDU R26, R27 /* R27 = &m->stb[hash] */
+
+ MOVW M(BADVADDR), R26
+ NOOP
+ AND $BY2PG, R26
+
+ BNE R26, utlbodd /* odd page? */
+ NOOP
+
+utlbeven:
+ MOVW 4(R27), R26 /* R26 = m->stb[hash].phys0 */
+ BEQ R26, stlbm /* nothing cached? do it the hard way */
+ NOOP
+ MOVW R26, M(TLBPHYS0)
+ MOVW 8(R27), R26 /* R26 = m->stb[hash].phys1 */
+ JMP utlbcom
+ MOVW R26, M(TLBPHYS1) /* branch delay slot */
+
+utlbodd:
+ MOVW 8(R27), R26 /* R26 = m->stb[hash].phys1 */
+ BEQ R26, stlbm /* nothing cached? do it the hard way */
+ NOOP
+ MOVW R26, M(TLBPHYS1)
+ MOVW 4(R27), R26 /* R26 = m->stb[hash].phys0 */
+ MOVW R26, M(TLBPHYS0)
+
+utlbcom:
+ WAIT
+ MOVW M(TLBVIRT), R26
+ MOVW 0(R27), R27 /* R27 = m->stb[hash].virt */
+ BEQ R27, stlbm /* nothing cached? do it the hard way */
+ NOOP
+ /* is the stlb entry for the right virtual address? */
+ BNE R26, R27, stlbm /* M(TLBVIRT) != m->stb[hash].virt? */
+ NOOP
+
+ /* if an entry exists, overwrite it, else write a random one */
+ CONST (PGSZ, R27)
+ MOVW R27, M(PAGEMASK) /* select page size */
+ TLBP /* probe tlb */
+ NOOP
+ NOOP
+ MOVW M(INDEX), R26
+ NOOP
+ BGEZ R26, utlbindex /* if tlb entry found, rewrite it */
+ NOOP
+ MOVW M(RANDOM), R26
+ MOVW R26, M(INDEX)
+utlbindex:
+ NOOP
+ NOOP
+ TLBWI /* write indexed tlb entry */
+ NOOP
+
+utlbret:
+ GETMACH (R26)
+ MOVW 12(R26), R27 /* R27 = m->splpc */
+ MOVW M(EPC), R26
+ JMP (R27)
+ NOOP
+
+stlbm:
+ GETMACH (R26)
+ MOVW 12(R26), R27 /* R27 = m->splpc */
+
+ /* fall through */
+
+TEXT gevector(SB), $-4
+ MOVW M(STATUS), R26
+ WAIT
+ AND $KUSER, R26
+
+ BNE R26, wasuser
+ MOVW SP, R26 /* delay slot, old SP in R26 */
+
+waskernel:
+ JMP dosave
+ SUBU $UREGSIZE, SP /* delay slot, allocate frame on kernel stack */
+
+wasuser: /* get kernel stack for this user process */
+ GETMACH (SP)
+ MOVW 8(SP), SP /* m->proc */
+ MOVW 8(SP), SP /* m->proc->kstack */
+ ADDU $(KSTACK-UREGSIZE), SP
+
+dosave:
+ MOVW R31, 0x28(SP)
+
+ JAL saveregs(SB)
+ MOVW R26, 0x10(SP) /* delay slot, save old SP */
+
+ GETMACH (R(MACH))
+ MOVW 8(R(MACH)), R(USER) /* R24 = m->proc */
+ MOVW $setR30(SB), R30
+
+ BEQ R26, dosys /* set by saveregs() */
+ NOOP
+
+dotrap:
+ MOVW $forkret(SB), R31
+ JMP trap(SB)
+ MOVW 4(SP), R1 /* delay slot, first arg to trap() */
+
+dosys:
+ JAL syscall(SB)
+ MOVW 4(SP), R1 /* delay slot, first arg to syscall() */
+
+ /* fall through */
+
+TEXT forkret(SB), $-4
+ JAL restregs(SB) /* restores old PC in R26 */
+ MOVW 0x14(SP), R1 /* delay slot, CAUSE */
+
+ MOVW 0x28(SP), R31
+
+ JMP (R27)
+ MOVW 0x10(SP), SP /* delay slot */
+
+/*
+ * SP-> 0x00 --- (spill R31)
+ * 0x04 --- (trap()/syscall() arg1)
+ * 0x08 status
+ * 0x0C pc
+ * 0x10 sp/usp
+ * 0x14 cause
+ * 0x18 badvaddr
+ * 0x1C tlbvirt
+ * 0x20 hi
+ * 0x24 lo
+ * 0x28 r31
+ * .....
+ * 0x9c r1
+ */
+
+TEXT saveregs(SB), $-4
+ MOVW R1, 0x9C(SP)
+ MOVW R2, 0x98(SP)
+ MOVW M(STATUS), R2
+ ADDU $8, SP, R1
+ MOVW R1, 0x04(SP) /* arg to base of regs */
+ MOVW $~KMODEMASK, R1
+ AND R2, R1
+ MOVW R1, M(STATUS) /* so we can take another trap */
+ MOVW R2, 0x08(SP)
+ MOVW M(EPC), R2
+ MOVW M(CAUSE), R1
+ MOVW R2, 0x0C(SP)
+ MOVW R1, 0x14(SP)
+ AND $(EXCMASK<<2), R1
+ SUBU $(CSYS<<2), R1, R26
+
+ BEQ R26, notsaved /* is syscall? */
+ MOVW R27, 0x34(SP) /* delay slot */
+
+ MOVW M(BADVADDR), R1
+ MOVW M(TLBVIRT), R2
+ MOVW R1, 0x18(SP)
+ MOVW R2, 0x1C(SP)
+
+ MOVW HI, R1
+ MOVW LO, R2
+ MOVW R1, 0x20(SP)
+ MOVW R2, 0x24(SP)
+
+ MOVW R25, 0x3C(SP)
+ MOVW R24, 0x40(SP)
+ MOVW R23, 0x44(SP)
+ MOVW R22, 0x48(SP)
+ MOVW R21, 0x4C(SP)
+ MOVW R20, 0x50(SP)
+ MOVW R19, 0x54(SP)
+ MOVW R18, 0x58(SP)
+ MOVW R17, 0x5C(SP)
+ MOVW R16, 0x60(SP)
+ MOVW R15, 0x64(SP)
+ MOVW R14, 0x68(SP)
+ MOVW R13, 0x6C(SP)
+ MOVW R12, 0x70(SP)
+ MOVW R11, 0x74(SP)
+ MOVW R10, 0x78(SP)
+ MOVW R9, 0x7C(SP)
+ MOVW R8, 0x80(SP)
+ MOVW R7, 0x84(SP)
+ MOVW R6, 0x88(SP)
+ MOVW R5, 0x8C(SP)
+ MOVW R4, 0x90(SP)
+ MOVW R3, 0x94(SP)
+
+notsaved:
+ MOVW R30, 0x2C(SP)
+
+ RET
+ MOVW R28, 0x30(SP) /* delay slot */
+
+TEXT restregs(SB), $-4
+ AND $(EXCMASK<<2), R1
+ SUBU $(CSYS<<2), R1, R26
+
+ BEQ R26, notrestored /* is syscall? */
+ MOVW 0x34(SP), R27 /* delay slot */
+
+ MOVW 0x3C(SP), R25
+ MOVW 0x40(SP), R24
+ MOVW 0x44(SP), R23
+ MOVW 0x48(SP), R22
+ MOVW 0x4C(SP), R21
+ MOVW 0x50(SP), R20
+ MOVW 0x54(SP), R19
+ MOVW 0x58(SP), R18
+ MOVW 0x5C(SP), R17
+ MOVW 0x60(SP), R16
+ MOVW 0x64(SP), R15
+ MOVW 0x68(SP), R14
+ MOVW 0x6C(SP), R13
+ MOVW 0x70(SP), R12
+ MOVW 0x74(SP), R11
+ MOVW 0x78(SP), R10
+ MOVW 0x7C(SP), R9
+ MOVW 0x80(SP), R8
+ MOVW 0x84(SP), R7
+ MOVW 0x88(SP), R6
+ MOVW 0x8C(SP), R5
+ MOVW 0x90(SP), R4
+ MOVW 0x94(SP), R3
+
+ MOVW 0x24(SP), R2
+ MOVW 0x20(SP), R1
+ MOVW R2, LO
+ MOVW R1, HI
+
+ MOVW 0x98(SP), R2
+
+notrestored:
+ MOVW 0x08(SP), R1
+ MOVW R1, M(STATUS)
+ MOVW 0x0C(SP), R26 /* old PC */
+ MOVW R26, M(EPC)
+
+ MOVW 0x30(SP), R28
+ MOVW 0x2C(SP), R30
+
+ RET
+ MOVW 0x9C(SP), R1 /* delay slot */
+
+/*
+ * hardware interrupt vectors
+ */
+
+TEXT vector0(SB), $-4
+ WAIT
+ CONST (SPBADDR+0x18, R26)
+ MOVW $eret(SB), R27
+ MOVW (R26), R26
+ JMP (R26)
+ NOOP
+
+TEXT vector180(SB), $-4
+ WAIT
+ CONST (SPBADDR+0x14, R26)
+ MOVW $eret(SB), R27
+ MOVW (R26), R26
+ JMP (R26)
+ NOOP
+
+TEXT eret(SB), $-4
+ ERET
+ NOOP
+
+/*
+ * floating-point stuff
+ */
+
+TEXT clrfpintr(SB), $0
+ MOVW M(STATUS), R3
+ WAIT
+ OR $CU1, R3
+ MOVW R3, M(STATUS)
+ NOOP
+ NOOP
+ NOOP
+
+ MOVW FCR31, R1
+ MOVW R1, R2
+ AND $~(0x3F<<12), R2
+ MOVW R2, FCR31
+
+ AND $~CU1, R3
+ MOVW R3, M(STATUS)
+ WAIT
+ RETURN
+
+TEXT savefpregs(SB), $0
+ MOVW FCR31, R2
+ MOVW M(STATUS), R3
+ WAIT
+ AND $~(0x3F<<12), R2, R4
+ MOVW R4, FCR31
+
+ MOVD F0, 0x00(R1)
+ MOVD F2, 0x08(R1)
+ MOVD F4, 0x10(R1)
+ MOVD F6, 0x18(R1)
+ MOVD F8, 0x20(R1)
+ MOVD F10, 0x28(R1)
+ MOVD F12, 0x30(R1)
+ MOVD F14, 0x38(R1)
+ MOVD F16, 0x40(R1)
+ MOVD F18, 0x48(R1)
+ MOVD F20, 0x50(R1)
+ MOVD F22, 0x58(R1)
+ MOVD F24, 0x60(R1)
+ MOVD F26, 0x68(R1)
+ MOVD F28, 0x70(R1)
+ MOVD F30, 0x78(R1)
+
+ MOVW R2, 0x80(R1)
+ AND $~CU1, R3
+ MOVW R3, M(STATUS)
+ WAIT
+ RETURN
+
+TEXT restfpregs(SB), $0
+ MOVW M(STATUS), R3
+ WAIT
+ OR $CU1, R3
+ MOVW R3, M(STATUS)
+ WAIT
+ MOVW fpstat+4(FP), R2
+ NOOP
+
+ MOVD 0x00(R1), F0
+ MOVD 0x08(R1), F2
+ MOVD 0x10(R1), F4
+ MOVD 0x18(R1), F6
+ MOVD 0x20(R1), F8
+ MOVD 0x28(R1), F10
+ MOVD 0x30(R1), F12
+ MOVD 0x38(R1), F14
+ MOVD 0x40(R1), F16
+ MOVD 0x48(R1), F18
+ MOVD 0x50(R1), F20
+ MOVD 0x58(R1), F22
+ MOVD 0x60(R1), F24
+ MOVD 0x68(R1), F26
+ MOVD 0x70(R1), F28
+ MOVD 0x78(R1), F30
+
+ MOVW R2, FCR31
+ AND $~CU1, R3
+ MOVW R3, M(STATUS)
+ WAIT
+ RETURN
+
+TEXT fcr31(SB), $0 /* fp csr */
+ MOVW FCR31, R1
+ RETURN
+
+/*
+ * Emulate 68020 test and set: load linked / store conditional
+ */
+
+TEXT tas(SB), $0
+ MOVW R1, R2 /* address of key */
+tas1:
+ MOVW $1, R3
+ LL(2, 1)
+ NOOP
+ SC(2, 3)
+ NOOP
+ BEQ R3, tas1
+ NOOP
+ RETURN
+
+/* used by the semaphore implementation */
+TEXT cmpswap(SB), $0
+ MOVW R1, R2 /* address of key */
+ MOVW old+4(FP), R3 /* old value */
+ MOVW new+8(FP), R4 /* new value */
+ LL(2, 1) /* R1 = (R2) */
+ NOOP
+ BNE R1, R3, fail
+ NOOP
+ MOVW R4, R1
+ SC(2, 1) /* (R2) = R1 if (R2) hasn't changed; R1 = success */
+ NOOP
+ RETURN
+fail:
+ MOVW R0, R1
+ RETURN
+
+/*
+ * cache manipulation
+ */
+
+TEXT icflush(SB), $-4 /* icflush(virtaddr, count) */
+ MOVW M(STATUS), R10
+ WAIT
+ MOVW 4(FP), R9
+ MOVW $0, M(STATUS)
+ WAIT
+ ADDU R1, R9 /* R9 = last address */
+ MOVW $(~0x3f), R8
+ AND R1, R8 /* R8 = first address, rounded down */
+ ADDU $0x3f, R9
+ AND $(~0x3f), R9 /* round last address up */
+ SUBU R8, R9 /* R9 = revised count */
+icflush1: /* primary cache line size is 16 bytes */
+ CACHE PD+HWB, 0x00(R8)
+ CACHE PI+HINV, 0x00(R8)
+ CACHE PD+HWB, 0x10(R8)
+ CACHE PI+HINV, 0x10(R8)
+ CACHE PD+HWB, 0x20(R8)
+ CACHE PI+HINV, 0x20(R8)
+ CACHE PD+HWB, 0x30(R8)
+ CACHE PI+HINV, 0x30(R8)
+ SUBU $0x40, R9
+ ADDU $0x40, R8
+ BGTZ R9, icflush1
+ MOVW R10, M(STATUS)
+ WAIT
+ RETURN
+
+TEXT dcflush(SB), $-4 /* dcflush(virtaddr, count) */
+ MOVW M(STATUS), R10
+ WAIT
+ MOVW 4(FP), R9
+ MOVW $0, M(STATUS)
+ WAIT
+ ADDU R1, R9 /* R9 = last address */
+ MOVW $(~0x3f), R8
+ AND R1, R8 /* R8 = first address, rounded down */
+ ADDU $0x3f, R9
+ AND $(~0x3f), R9 /* round last address up */
+ SUBU R8, R9 /* R9 = revised count */
+dcflush1: /* primary cache line size is 16 bytes */
+ CACHE PD+HWB, 0x00(R8)
+ CACHE PD+HWB, 0x10(R8)
+ CACHE PD+HWB, 0x20(R8)
+ CACHE PD+HWB, 0x30(R8)
+ SUBU $0x40, R9
+ ADDU $0x40, R8
+ BGTZ R9, dcflush1
+ MOVW R10, M(STATUS)
+ WAIT
+ RETURN
+
+/*
+ * access to CP0 registers
+ */
+
+TEXT prid(SB), $0
+ MOVW M(PRID), R1
+ WAIT
+ RETURN
+
+TEXT rdcount(SB), $0
+ MOVW M(COUNT), R1
+ RETURN
+
+TEXT wrcount(SB), $0
+ MOVW R1, M(COUNT)
+ RETURN
+
+TEXT wrcompare(SB), $0
+ MOVW R1, M(COMPARE)
+ RETURN
+
+TEXT rdcompare(SB), $0
+ MOVW M(COMPARE), R1
+ RETURN
+
+ SCHED
--- /dev/null
+++ b/sys/src/9/sgi/main.c
@@ -1,0 +1,487 @@
+#include "u.h"
+#include "../port/lib.h"
+#include "mem.h"
+#include "dat.h"
+#include "fns.h"
+#include "io.h"
+#include "init.h"
+#include "pool.h"
+#include "../ip/ip.h"
+#include <tos.h>
+#include <../port/error.h>
+
+enum {
+ /* space for syscall args, return PC, top-of-stack struct */
+ Stkheadroom = sizeof(Sargs) + sizeof(uintptr) + sizeof(Tos),
+};
+
+static uchar *sp; /* XXX - must go - user stack of init proc */
+static FPsave initfp;
+
+/*
+ * software tlb simulation
+ */
+static Softtlb stlb[MAXMACH][STLBSIZE];
+
+Conf conf;
+
+char*
+getconf(char *name)
+{
+ return (char*)arcs(0x78, name);
+}
+
+static void
+fmtinit(void)
+{
+ printinit();
+ quotefmtinstall();
+ /* ipreset installs these when chandevreset runs */
+ fmtinstall('i', eipfmt);
+ fmtinstall('I', eipfmt);
+ fmtinstall('E', eipfmt);
+ fmtinstall('V', eipfmt);
+ fmtinstall('M', eipfmt);
+}
+
+static int
+ckpagemask(ulong mask, ulong size)
+{
+ int s;
+ ulong pm;
+
+ s = splhi();
+ setpagemask(mask);
+ pm = getpagemask();
+ splx(s);
+ if(pm != mask){
+ iprint("page size %ldK not supported on this cpu; "
+ "mask %#lux read back as %#lux\n", size/1024, mask, pm);
+ return -1;
+ }
+ return 0;
+}
+
+void
+addmem(uintptr base, uintptr top)
+{
+ uintptr s, e;
+ ulong *m;
+ int i;
+
+ if(base >= top)
+ return;
+
+ /* exclude kernel */
+ s = 0;
+ e = PADDR(PGROUND((uintptr)end));
+ if(s < top && e > base){
+ if(s > base)
+ addmem(base, s);
+ if(e < top)
+ addmem(e, top);
+ return;
+ }
+
+ /* exclude reserved firmware memory regions */
+ m = nil;
+ while((m = (ulong*)arcs(0x48, m)) != nil){
+ s = m[1]<<12;
+ e = s + (m[2]<<12);
+ switch(m[0]){
+ case 2: /* FreeMemory */
+ case 3: /* BadMemory */
+ continue;
+ }
+ if(s < top && e > base){
+ if(s > base)
+ addmem(base, s);
+ if(e < top)
+ addmem(e, top);
+ return;
+ }
+ }
+ for(i=0; i<nelem(conf.mem); i++){
+ if(conf.mem[i].npage == 0){
+ conf.mem[i].base = base;
+ conf.mem[i].npage = (top - base)/BY2PG;
+
+ conf.npage += conf.mem[i].npage;
+ return;
+ }
+ }
+ print("conf.mem[] too small\n");
+}
+
+/*
+ * get memory configuration word for a bank
+ */
+ulong
+bank_conf(int bank)
+{
+ switch(bank){
+ case 0:
+ return *(ulong *)(KSEG1|MEMCFG0) >> 16;
+ case 1:
+ return *(ulong *)(KSEG1|MEMCFG0) & 0xffff;
+ case 2:
+ return *(ulong *)(KSEG1|MEMCFG1) >> 16;
+ case 3:
+ return *(ulong *)(KSEG1|MEMCFG1) & 0xffff;
+ }
+ return 0;
+}
+
+void
+meminit(void)
+{
+ uintptr base, size, top;
+ ulong mconf;
+ int i;
+
+ /*
+ * divide memory twixt user pages and kernel.
+ */
+ conf.npage = 0;
+ for(i=0; i<4; i++){
+ mconf = bank_conf(i);
+ if(!(mconf & 0x2000))
+ continue;
+ base = (mconf & 0xff) << 22;
+ size = ((mconf & 0x1f00) + 0x0100) << 14;
+ top = base + size;
+ addmem(base, top);
+ }
+}
+
+void
+main(void)
+{
+ savefpregs(&initfp);
+
+ arcsconsinit();
+
+ meminit();
+ confinit();
+ machinit(); /* calls clockinit */
+ active.exiting = 0;
+ active.machs = 1;
+ print("\nPlan 9\n");
+
+ kmapinit();
+ xinit();
+ timersinit();
+ fmtinit();
+
+ ckpagemask(PGSZ, BY2PG);
+ tlbinit();
+ pageinit();
+ procinit0();
+ initseg();
+ links();
+ chandevreset();
+
+ swapinit();
+ userinit();
+ schedinit();
+ panic("schedinit returned");
+}
+
+/*
+ * initialize a processor's mach structure. each processor does this
+ * for itself.
+ */
+void
+machinit(void)
+{
+ extern void gevector(void); /* l.s */
+ extern void utlbmiss(void);
+ extern void vector0(void);
+ extern void vector180(void);
+
+ void **sbp = (void*)SPBADDR;
+
+ m->stb = stlb[m->machno];
+
+ /* install exception handlers */
+ sbp[0x18/4] = utlbmiss;
+ sbp[0x14/4] = gevector;
+
+ /* we could install our own vectors directly, but we'll try to play nice */
+ if(0){
+ memmove((void*)(KSEG0+0x0), (void*)vector0, 0x80);
+ memmove((void*)(KSEG0+0x180), (void*)vector180, 0x80);
+ icflush((void*)(KSEG0+0x0), 0x80);
+ icflush((void*)(KSEG0+0x180), 0x80);
+ }
+
+ /* Ensure CU1 is off */
+ clrfpintr();
+ clockinit();
+}
+
+void
+init0(void)
+{
+ char buf[128];
+
+ up->nerrlab = 0;
+
+ spllo();
+
+ /*
+ * These are o.k. because rootinit is null.
+ * Then early kproc's will have a root and dot.
+ */
+ up->slash = namec("#/", Atodir, 0, 0);
+ pathclose(up->slash->path);
+ up->slash->path = newpath("/");
+ up->dot = cclone(up->slash);
+
+ chandevinit();
+
+ if(!waserror()){
+ ksetenv("cputype", "mips", 0);
+ snprint(buf, sizeof buf, "mips %s", conffile);
+ ksetenv("terminal", buf, 0);
+ if(cpuserver)
+ ksetenv("service", "cpu", 0);
+ else
+ ksetenv("service", "terminal", 0);
+
+ ksetenv("bootargs", "tcp", 0);
+
+ /* make kbdfs attach to /dev/eia0 arcs console */
+ ksetenv("console", "0", 0);
+
+ /* no usb */
+ ksetenv("usbwait", "0", 0);
+ ksetenv("nousbrc", "1", 0);
+
+ poperror();
+ }
+
+ /* process input for arcs console */
+ kproc("arcs", arcsproc, 0);
+
+ kproc("alarm", alarmkproc, 0);
+ touser(sp);
+}
+
+static uchar *
+pusharg(char *p)
+{
+ int n;
+
+ n = strlen(p) + 1;
+ sp -= n;
+ memmove(sp, p, n);
+ return sp;
+}
+
+static void
+bootargs(uintptr base)
+{ int i, ac;
+ uchar *av[32];
+ uchar **lsp;
+
+ sp = (uchar *) base + BY2PG - sizeof(Tos);
+
+ ac = 0;
+ av[ac++] = pusharg("boot");
+ sp = (uchar *) ((ulong) sp & ~7);
+ sp -= ROUND((ac + 1) * sizeof(sp), 8) + 4;
+ lsp = (uchar **) sp;
+ for(i = 0; i < ac; i++)
+ lsp[i] = av[i] + ((USTKTOP - BY2PG) - (ulong) base);
+ lsp[i] = 0;
+ sp += (USTKTOP - BY2PG) - (ulong) base;
+}
+
+void
+userinit(void)
+{
+ Proc *p;
+ KMap *k;
+ Page *pg;
+ Segment *s;
+
+ p = newproc();
+ p->pgrp = newpgrp();
+ p->egrp = smalloc(sizeof(Egrp));
+ p->egrp->ref = 1;
+ p->fgrp = dupfgrp(nil);
+ p->rgrp = newrgrp();
+ p->procmode = 0640;
+
+ kstrdup(&eve, "");
+ kstrdup(&p->text, "*init*");
+ kstrdup(&p->user, eve);
+
+ procsetup(p);
+
+ /*
+ * Kernel Stack
+ */
+ p->sched.pc = (ulong)init0;
+ p->sched.sp = (ulong)p->kstack+KSTACK-Stkheadroom;
+ p->sched.sp = STACKALIGN(p->sched.sp);
+
+ /*
+ * User Stack
+ *
+ * Technically, newpage can't be called here because it
+ * should only be called when in a user context as it may
+ * try to sleep if there are no pages available, but that
+ * shouldn't be the case here.
+ */
+ s = newseg(SG_STACK, USTKTOP-USTKSIZE, USTKSIZE/BY2PG);
+ p->seg[SSEG] = s;
+ pg = newpage(1, 0, USTKTOP-BY2PG);
+ segpage(s, pg);
+ k = kmap(pg);
+ bootargs(VA(k));
+ kunmap(k);
+
+ /*
+ * Text
+ */
+ s = newseg(SG_TEXT, UTZERO, 1);
+ s->flushme++;
+ p->seg[TSEG] = s;
+ pg = newpage(1, 0, UTZERO);
+ pg->txtflush = ~0;
+ segpage(s, pg);
+ k = kmap(s->map[0]->pages[0]);
+ memset((void *)VA(k), 0, BY2PG);
+ memmove((ulong*)VA(k), initcode, sizeof initcode);
+ kunmap(k);
+
+ ready(p);
+}
+
+void
+exit(int ispanic)
+{
+ splhi();
+ while(ispanic);
+ arcs(0x18); /* reboot */
+}
+
+void
+reboot(void *, void *, ulong)
+{
+}
+
+void
+evenaddr(uintptr va)
+{
+ if((va & 3) != 0){
+ dumpstack();
+ postnote(up, 1, "sys: odd address", NDebug);
+ error(Ebadarg);
+ }
+}
+
+void
+procsetup(Proc *p)
+{
+ p->fpstate = FPinit;
+ p->fpsave = initfp;
+
+ cycles(&p->kentry);
+ p->pcycles = -p->kentry;
+}
+
+void
+procfork(Proc *p)
+{
+ int s;
+
+ p->kentry = up->kentry;
+ p->pcycles = -p->kentry;
+
+ s = splhi();
+ switch(up->fpstate & ~FPillegal){
+ case FPactive:
+ savefpregs(&up->fpsave);
+ up->fpstate = FPinactive;
+ /* wet floor */
+ case FPinactive:
+ p->fpsave = up->fpsave;
+ p->fpstate = FPinactive;
+ }
+ splx(s);
+}
+
+void
+procsave(Proc *p)
+{
+ uvlong t;
+
+ if(p->fpstate == FPactive){
+ if(p->state != Moribund) {
+ savefpregs(&p->fpsave);
+ p->fpstate = FPinactive;
+ }
+ }
+
+ cycles(&t);
+ p->pcycles += t;
+}
+
+void
+procrestore(Proc *p)
+{
+ uvlong t;
+
+ if(p->kp)
+ return;
+ cycles(&t);
+ p->pcycles -= t;
+}
+
+void
+idlehands(void)
+{
+}
+
+void
+confinit(void)
+{
+ ulong kpages;
+
+ /*
+ * set up CPU's mach structure
+ * cpu0's was zeroed in l.s and our stack is in Mach, so don't zero it.
+ */
+ m->machno = 0;
+ m->speed = 150; /* initial guess at MHz */
+ m->hz = m->speed * Mhz;
+ conf.nmach = 1;
+
+ /* set up other configuration parameters */
+ conf.nproc = 2000;
+ conf.nswap = 262144;
+ conf.nswppo = 4096;
+ conf.nimage = 200;
+
+ conf.copymode = 0; /* copy on write */
+
+ kpages = conf.npage - (conf.npage*80)/100;
+ if(kpages > (64*MB + conf.npage*sizeof(Page))/BY2PG){
+ kpages = (64*MB + conf.npage*sizeof(Page))/BY2PG;
+ kpages += (conf.nproc*KSTACK)/BY2PG;
+ }
+ conf.upages = conf.npage - kpages;
+ conf.ialloc = (kpages/2)*BY2PG;
+
+ kpages *= BY2PG;
+ kpages -= conf.upages*sizeof(Page)
+ + conf.nproc*sizeof(Proc)
+ + conf.nimage*sizeof(Image)
+ + conf.nswap
+ + conf.nswppo*sizeof(Page*);
+ mainmem->maxsize = kpages;
+// mainmem->flags |= POOL_PARANOIA;
+}
--- /dev/null
+++ b/sys/src/9/sgi/mem.h
@@ -1,0 +1,277 @@
+/*
+ * Memory and machine-specific definitions. Used in C and assembler.
+ */
+
+#define MIN(a, b) ((a) < (b)? (a): (b))
+#define MAX(a, b) ((a) > (b)? (a): (b))
+
+/*
+ * Sizes
+ */
+
+#define BI2BY 8 /* bits per byte */
+#define BI2WD 32 /* bits per word */
+#define BY2WD 4 /* bytes per word */
+#define BY2V 8 /* bytes per vlong */
+
+#define ROUND(s, sz) (((s)+((sz)-1))&~((sz)-1))
+#define PGROUND(s) ROUND(s, BY2PG)
+
+#define MAXBY2PG (16*1024) /* rounding for UTZERO in executables; see mkfile */
+#define UTROUND(t) ROUNDUP((t), MAXBY2PG)
+
+#ifndef BIGPAGES
+#define BY2PG 4096 /* bytes per page */
+#define PGSHIFT 12 /* log2(BY2PG) */
+#define PGSZ PGSZ4K
+#else
+/* 16K pages work very poorly */
+#define BY2PG (16*1024) /* bytes per page */
+#define PGSHIFT 14 /* log2(BY2PG) */
+#define PGSZ PGSZ16K
+#endif
+
+#define KSTACK (8*1024) /* Size of kernel stack */
+#define MACHSIZE (BY2PG+KSTACK)
+#define WD2PG (BY2PG/BY2WD) /* words per page */
+
+#define MAXMACH 1 /* max # cpus system can run; see active.machs */
+#define STACKALIGN(sp) ((sp) & ~7) /* bug: assure with alloc */
+#define BLOCKALIGN 16
+#define CACHELINESZ 32 /* mips24k */
+#define ICACHESIZE (64*1024) /* rb450g */
+#define DCACHESIZE (32*1024) /* rb450g */
+
+#define MASK(w) FMASK(0, w)
+
+/*
+ * Time
+ */
+#define HZ 100 /* clock frequency */
+#define MS2HZ (1000/HZ) /* millisec per clock tick */
+#define TK2SEC(t) ((t)/HZ) /* ticks to seconds */
+
+/*
+ * CP0 registers
+ */
+
+#define INDEX 0
+#define RANDOM 1
+#define TLBPHYS0 2 /* aka ENTRYLO0 */
+#define TLBPHYS1 3 /* aka ENTRYLO1 */
+#define CONTEXT 4
+#define PAGEMASK 5
+#define WIRED 6
+#define BADVADDR 8
+#define COUNT 9
+#define TLBVIRT 10 /* aka ENTRYHI */
+#define COMPARE 11
+#define STATUS 12
+#define CAUSE 13
+#define EPC 14
+#define PRID 15
+#define CONFIG 16
+#define LLADDR 17
+#define WATCHLO 18
+#define WATCHHI 19
+#define DEBUGREG 23
+#define DEPC 24
+#define PERFCOUNT 25
+#define CACHEECC 26
+#define CACHEERR 27
+#define TAGLO 28
+#define TAGHI 29
+#define ERROREPC 30
+#define DESAVE 31
+
+/*
+ * M(STATUS) bits
+ */
+#define KMODEMASK 0x0000001f
+#define IE 0x00000001 /* master interrupt enable */
+#define EXL 0x00000002 /* exception level */
+#define ERL 0x00000004 /* error level */
+#define KSUPER 0x00000008
+#define KUSER 0x00000010
+#define KSU 0x00000018
+#define UX 0x00000020
+#define SX 0x00000040
+#define KX 0x00000080
+#define INTMASK 0x0000ff00
+#define SW0 0x00000100
+#define SW1 0x00000200
+#define INTR0 0x00000100 /* interrupt enable bits */
+#define INTR1 0x00000200
+#define INTR2 0x00000400
+#define INTR3 0x00000800
+#define INTR4 0x00001000
+#define INTR5 0x00002000
+#define INTR6 0x00004000
+#define INTR7 0x00008000
+#define DE 0x00010000
+#define TS 0x00200000 /* tlb shutdown; on 24k at least */
+#define BEV 0x00400000 /* bootstrap exception vectors */
+#define RE 0x02000000 /* reverse-endian in user mode */
+#define FR 0x04000000 /* enable 32 FP regs */
+#define CU0 0x10000000
+#define CU1 0x20000000 /* FPU enable */
+
+/*
+ * M(CONFIG) bits
+ */
+
+#define CFG_K0 7 /* kseg0 cachability */
+#define CFG_MM (1<<18) /* write-through merging enabled */
+
+/*
+ * M(CAUSE) bits
+ */
+
+#define BD (1<<31) /* last excep'n occurred in branch delay slot */
+
+/*
+ * Exception codes
+ */
+#define EXCMASK 0x1f /* mask of all causes */
+#define CINT 0 /* external interrupt */
+#define CTLBM 1 /* TLB modification: store to unwritable page */
+#define CTLBL 2 /* TLB miss (load or fetch) */
+#define CTLBS 3 /* TLB miss (store) */
+#define CADREL 4 /* address error (load or fetch) */
+#define CADRES 5 /* address error (store) */
+#define CBUSI 6 /* bus error (fetch) */
+#define CBUSD 7 /* bus error (data load or store) */
+#define CSYS 8 /* system call */
+#define CBRK 9 /* breakpoint */
+#define CRES 10 /* reserved instruction */
+#define CCPU 11 /* coprocessor unusable */
+#define COVF 12 /* arithmetic overflow */
+#define CTRAP 13 /* trap */
+#define CVCEI 14 /* virtual coherence exception (instruction) */
+#define CFPE 15 /* floating point exception */
+#define CTLBRI 19 /* tlb read-inhibit */
+#define CTLBXI 20 /* tlb execute-inhibit */
+#define CWATCH 23 /* watch exception */
+#define CMCHK 24 /* machine checkcore */
+#define CCACHERR 30 /* cache error */
+#define CVCED 31 /* virtual coherence exception (data) */
+
+/*
+ * M(CACHEECC) a.k.a. ErrCtl bits
+ */
+#define PE (1<<31)
+#define LBE (1<<25)
+#define WABE (1<<24)
+
+/*
+ * Trap vectors
+ */
+
+#define UTLBMISS (KSEG0+0x000)
+#define XEXCEPTION (KSEG0+0x080)
+#define CACHETRAP (KSEG0+0x100)
+#define EXCEPTION (KSEG0+0x180)
+
+/*
+ * Magic registers
+ */
+
+#define USER 24 /* R24 is up-> */
+#define MACH 25 /* R25 is m-> */
+
+#define UREGSIZE 0xA0 /* sizeof(Ureg)+8 */
+
+/*
+ * MMU
+ */
+#define PGSZ4K (0x00<<13)
+#define PGSZ16K (0x03<<13) /* on 24k */
+#define PGSZ64K (0x0F<<13)
+#define PGSZ256K (0x3F<<13)
+#define PGSZ1M (0xFF<<13)
+#define PGSZ4M (0x3FF<<13)
+#define PGSZ8M (0x7FF<<13) /* not on 24k */
+#define PGSZ16M (0xFFF<<13)
+#define PGSZ64M (0x3FFF<<13) /* on 24k */
+#define PGSZ256M (0xFFFF<<13) /* on 24k */
+
+/* mips address spaces, tlb-mapped unless marked otherwise */
+#define KUSEG 0x00000000 /* user process */
+#define KSEG0 0x80000000 /* kernel (direct mapped, cached) */
+#define KSEG1 0xA0000000 /* kernel (direct mapped, uncached: i/o) */
+#define KSEG2 0xC0000000 /* kernel, used for TSTKTOP */
+#define KSEG3 0xE0000000 /* kernel, used by kmap */
+#define KSEGM 0xE0000000 /* mask to check which seg */
+
+/*
+ * Fundamental addresses
+ */
+
+#define REBOOTADDR KADDR(0x1000) /* just above vectors */
+#define MACHADDR 0x88014000 /* Mach structures */
+#define MACHP(n) ((Mach *)(MACHADDR+(n)*MACHSIZE))
+#define KMAPADDR 0xE0000000 /* kmap'd addresses */
+#define SPBADDR 0x80001000
+
+#define PIDXSHFT 12
+#ifndef BIGPAGES
+#define NCOLOR 8
+#define PIDX ((NCOLOR-1)<<PIDXSHFT)
+#define getpgcolor(a) (((ulong)(a)>>PIDXSHFT) % NCOLOR)
+#else
+/* no cache aliases are possible with pages of 16K or larger */
+#define NCOLOR 1
+#define PIDX 0
+#define getpgcolor(a) 0
+#endif
+#define KMAPSHIFT 15
+
+#define PTEGLOBL (1<<0)
+#define PTEVALID (1<<1)
+#define PTEWRITE (1<<2)
+#define PTERONLY 0
+#define PTEALGMASK (7<<3)
+#define PTENONCOHERWT (0<<3) /* cached, write-through (slower) */
+#define PTEUNCACHED (2<<3)
+#define PTENONCOHERWB (3<<3) /* cached, write-back */
+#define PTEUNCACHEDACC (7<<3)
+/* rest are reserved on 24k */
+#define PTECOHERXCL (4<<3)
+#define PTECOHERXCLW (5<<3)
+#define PTECOHERUPDW (6<<3)
+
+/* how much faster is it? mflops goes from about .206 (WT) to .37 (WB) */
+// #define PTECACHABILITY PTENONCOHERWT /* 24k erratum 48 disallows WB */
+#define PTECACHABILITY PTENONCOHERWB
+
+#define PTEPID(n) (n)
+#define PTEMAPMEM (1024*1024)
+#define PTEPERTAB (PTEMAPMEM/BY2PG)
+#define SEGMAPSIZE 512
+#define SSEGMAPSIZE 16
+
+#define STLBLOG 15
+#define STLBSIZE (1<<STLBLOG) /* entries in the soft TLB */
+/* page # bits that don't fit in STLBLOG bits */
+#define HIPFNBITS (BI2WD - (PGSHIFT+1) - STLBLOG)
+#define KPTELOG 8
+#define KPTESIZE (1<<KPTELOG) /* entries in the kfault soft TLB */
+
+#define TLBPID(n) ((n)&0xFF)
+#define NTLBPID 256 /* # of pids (affects size of Mach) */
+#define NTLB 48 /* # of entries (r4k) */
+#define TLBOFF 1 /* first tlb entry (0 used within mmuswitch) */
+#define NKTLB 2 /* # of initial kfault tlb entries */
+#define TLBROFF (TLBOFF+NKTLB) /* first large IO window tlb entry */
+
+/*
+ * Address spaces
+ */
+#define UZERO KUSEG /* base of user address space */
+#define UTZERO (UZERO+MAXBY2PG) /* 1st user text address; see mkfile */
+#define USTKTOP (KZERO-BY2PG) /* byte just beyond user stack */
+#define USTKSIZE (8*1024*1024) /* size of user stack */
+#define TSTKTOP (KSEG2+USTKSIZE-BY2PG) /* top of temporary stack */
+#define TSTKSIZ (1024*1024/BY2PG) /* can be at most UTSKSIZE/BY2PG */
+#define KZERO KSEG0 /* base of kernel address space */
+#define KTZERO (KZERO+0x08020000) /* first address in kernel text */
--- /dev/null
+++ b/sys/src/9/sgi/mkfile
@@ -1,0 +1,93 @@
+CONF=indy
+CONFLIST=indy
+
+objtype=mips
+</$objtype/mkfile
+p=9
+# must match mem.h
+# KTZERO=0x88020000
+KTZERO=0x88020000
+# must match mem.h
+UTZERO=0x4020
+
+# CFLAGS=$CFLAGS -DFPEMUDEBUG
+
+DEVS=`{rc ../port/mkdevlist $CONF}
+
+PORT=\
+ alarm.$O\
+ alloc.$O\
+ allocb.$O\
+ auth.$O\
+ cache.$O\
+ chan.$O\
+ clock.$O\
+ dev.$O\
+ edf.$O\
+ fault.$O\
+ fptrap.$O\
+ mul64fract.$O\
+ page.$O\
+ parse.$O\
+ pgrp.$O\
+ portclock.$O\
+ print.$O\
+ proc.$O\
+ qio.$O\
+ qlock.$O\
+ rdb.$O\
+ rebootcmd.$O\
+ segment.$O\
+ swap.$O\
+ syscallfmt.$O\
+ sysfile.$O\
+ sysproc.$O\
+ taslock.$O\
+ tod.$O\
+ xalloc.$O\
+
+OBJ=\
+ l.$O\
+ faultmips.$O\
+ main.$O\
+ mmu.$O\
+ random.$O\
+ trap.$O\
+ $CONF.root.$O\
+ $CONF.rootc.$O\
+ $DEVS\
+ $PORT\
+
+LIB=\
+ /mips/lib/libauth.a\
+ /mips/lib/libsec.a\
+ /mips/lib/libmp.a\
+ /mips/lib/libip.a\
+ /mips/lib/libc.a\
+
+$p$CONF: $OBJ $CONF.c $LIB
+ $CC $CFLAGS '-DKERNDATE='`{date -n} $CONF.c
+ $LD -o $target -l -R8 -H3 -T$KTZERO $OBJ $CONF.$O $LIB
+ size $target
+
+install:V: $p$CONF
+ cp $p$CONF /$objtype/
+
+<../boot/bootmkfile
+<../port/portmkfile
+<|../port/mkbootrules $CONF
+
+init.h: init9.s ../port/initcode.c /sys/src/libc/9syscall/sys.h
+ va init9.s
+ vc ../port/initcode.c
+ vl -T$UTZERO -R4 -o init.out init9.$O initcode.$O
+ {echo 'uchar initcode[]={'
+ xd -r -1x init.out |
+ sed -e 's/^[0-9a-f]+ //' -e 's/ ([0-9a-f][0-9a-f])/0x\1,/g'
+ echo '};'} > init.h
+
+faultmips.$O mmu.$O syscall.$O trap.$O: /$objtype/include/ureg.h
+main.$O: /$objtype/include/ureg.h errstr.h init.h
+
+%.clean:V:
+ rm -f $stem.c [9bz]$stem [9bz]$stem.gz boot$stem.* init.h
--- /dev/null
+++ b/sys/src/9/sgi/mmu.c
@@ -1,0 +1,479 @@
+#include "u.h"
+#include "../port/lib.h"
+#include "mem.h"
+#include "dat.h"
+#include "fns.h"
+#include "ureg.h"
+
+/*
+ * tlb entry 0 is used only by mmuswitch() to set the current tlb pid.
+ *
+ * It is apparently assumed that user tlb entries are not
+ * overwritten during start-up, so ...
+ * During system start-up (before up first becomes non-nil),
+ * Kmap entries start at tlb index 1 and work their way up until
+ * kmapinval() removes them. They then restart at 1. As long as there
+ * are few kmap entries they will not pass tlbroff (the WIRED tlb entry
+ * limit) and interfere with user tlb entries.
+ * Once start-up is over, we combine the kernel and user tlb pools into one,
+ * in the hope of making better use of the tlb on systems with small ones.
+ *
+ * All invalidations of the tlb are via indexed entries. The virtual
+ * address used is always 'KZERO | (x<<(PGSHIFT+1) | currentpid' where
+ * 'x' is the index into the tlb. This ensures that the current pid doesn't
+ * change and that no two invalidated entries have matching virtual
+ * addresses just in case SGI/MIPS ever makes a chip that cares (as
+ * they keep threatening). These entries should never be used in
+ * lookups since accesses to KZERO addresses don't go through the tlb
+ * (actually only true of KSEG0 and KSEG1; KSEG2 and KSEG3 do go
+ * through the tlb).
+ */
+
+#define TLBINVAL(x, pid) puttlbx(x, KZERO|((x)<<(PGSHIFT+1))|(pid), 0, 0, PGSZ)
+
+enum {
+ Debugswitch = 0,
+ Debughash = 0,
+};
+
+static ulong ktime[8]; /* only for first 8 cpus */
+
+void
+tlbinit(void)
+{
+ int i;
+
+ for(i=0; i<NTLB; i++)
+ TLBINVAL(i, 0);
+}
+
+Lock kmaplock;
+KMap kpte[KPTESIZE];
+KMap* kmapfree;
+
+static int minfree = KPTESIZE;
+static int lastfree;
+static int tlbroff = TLBROFF;
+
+static void
+nfree(void)
+{
+ int i;
+ KMap *k;
+
+ i = 0;
+ for(k=kmapfree; k; k=k->next)
+ i++;
+ if(i<minfree){
+ iprint("%d free\n", i);
+ minfree = i;
+ }
+ lastfree = i;
+}
+
+void
+kmapinit(void)
+{
+ KMap *k, *klast;
+
+ lock(&kmaplock);
+ kmapfree = kpte;
+ klast = &kpte[KPTESIZE-1];
+ for(k=kpte; k<klast; k++)
+ k->next = k+1;
+ k->next = 0;
+ unlock(&kmaplock);
+
+ m->ktlbnext = TLBOFF;
+}
+
+void
+kmapdump(void)
+{
+ int i;
+
+ for(i=0; i<KPTESIZE; i++)
+ iprint("%d: %lud pc=%#lux\n", i, kpte[i].ref, kpte[i].pc);
+}
+
+static int
+putktlb(KMap *k)
+{
+ int x;
+ ulong virt;
+ ulong tlbent[3];
+
+ virt = k->virt & ~BY2PG | TLBPID(tlbvirt());
+ x = gettlbp(virt, tlbent);
+ if (!m->paststartup)
+ if (up) { /* startup just ended? */
+ tlbroff = 1;
+ setwired(tlbroff); /* share all-but-one entries */
+ m->paststartup = 1;
+ } else if (x < 0) { /* no such entry? use next */
+ x = m->ktlbnext++;
+ if(m->ktlbnext >= tlbroff)
+ m->ktlbnext = TLBOFF;
+ }
+ if (x < 0) x = getrandom(); /* no entry for va? overwrite random one */
+ puttlbx(x, virt, k->phys0, k->phys1, PGSZ);
+ m->ktlbx[x] = 1;
+ return x;
+}
+
+/*
+ * Arrange that the KMap'd virtual address will hit the same
+ * primary cache line as pg->va by making bits 14...12 of the
+ * tag the same as virtual address. These bits are the index
+ * into the primary cache and are checked whenever accessing
+ * the secondary cache through the primary. Violation causes
+ * a VCE trap.
+ */
+KMap *
+kmap(Page *pg)
+{
+ int s, printed = 0;
+ ulong pte, virt;
+ KMap *k;
+
+ s = splhi();
+ lock(&kmaplock);
+
+ if(kmapfree == 0) {
+retry:
+ unlock(&kmaplock);
+ kmapinval(); /* try and free some */
+ lock(&kmaplock);
+ if(kmapfree == 0){
+ unlock(&kmaplock);
+ splx(s);
+ if(printed++ == 0){
+ /* using iprint here we get mixed up with other prints */
+ print("%d KMAP RETRY %#lux ktime %ld %ld %ld %ld %ld %ld %ld %ld\n",
+ m->machno, getcallerpc(&pg),
+ ktime[0], ktime[1], ktime[2], ktime[3],
+ ktime[4], ktime[5], ktime[6], ktime[7]);
+ delay(200);
+ }
+ splhi();
+ lock(&kmaplock);
+ goto retry;
+ }
+ }
+
+ k = kmapfree;
+ kmapfree = k->next;
+
+ k->pg = pg;
+ /*
+ * One for the allocation,
+ * One for kactive
+ */
+ k->pc = getcallerpc(&pg);
+ k->ref = 2;
+ k->konmach[m->machno] = m->kactive;
+ m->kactive = k;
+
+ virt = pg->va;
+ /* bits 14..12 form the secondary-cache virtual index */
+ virt &= PIDX;
+ virt |= KMAPADDR | ((k-kpte)<<KMAPSHIFT);
+
+ k->virt = virt;
+ pte = PPN(pg->pa)|PTECACHABILITY|PTEGLOBL|PTEWRITE|PTEVALID;
+ if(virt & BY2PG) {
+ k->phys0 = PTEGLOBL | PTECACHABILITY;
+ k->phys1 = pte;
+ }
+ else {
+ k->phys0 = pte;
+ k->phys1 = PTEGLOBL | PTECACHABILITY;
+ }
+
+ putktlb(k);
+ unlock(&kmaplock);
+
+ splx(s);
+ return k;
+}
+
+void
+kunmap(KMap *k)
+{
+ int s;
+
+ s = splhi();
+ if(decref(k) == 0) {
+ k->virt = 0;
+ k->phys0 = 0;
+ k->phys1 = 0;
+ k->pg = 0;
+
+ lock(&kmaplock);
+ k->next = kmapfree;
+ kmapfree = k;
+//nfree();
+ unlock(&kmaplock);
+ }
+ splx(s);
+}
+
+void
+kfault(Ureg *ur) /* called from trap() */
+{
+ ulong index, addr;
+ KMap *k, *f;
+
+ addr = ur->badvaddr;
+ index = (addr & ~KSEGM) >> KMAPSHIFT;
+ if(index >= KPTESIZE)
+ panic("kmapfault: va=%#lux", addr);
+
+ k = &kpte[index];
+ if(k->virt == 0)
+ panic("kmapfault: unmapped %#lux", addr);
+
+ for(f = m->kactive; f; f = f->konmach[m->machno])
+ if(f == k)
+ break;
+ if(f == 0) {
+ incref(k);
+ k->konmach[m->machno] = m->kactive;
+ m->kactive = k;
+ }
+ putktlb(k);
+}
+
+void
+kmapinval(void)
+{
+ int mno, i, curpid;
+ KMap *k, *next;
+ uchar *ktlbx;
+
+ if(m->machno < nelem(ktime))
+ ktime[m->machno] = MACHP(0)->ticks;
+ if(m->kactive == 0)
+ return;
+
+ curpid = PTEPID(TLBPID(tlbvirt()));
+ ktlbx = m->ktlbx;
+ for(i = 0; i < NTLB; i++, ktlbx++){
+ if(*ktlbx == 0)
+ continue;
+ TLBINVAL(i, curpid);
+ *ktlbx = 0;
+ }
+
+ mno = m->machno;
+ for(k = m->kactive; k; k = next) {
+ next = k->konmach[mno];
+ kunmap(k);
+ }
+
+ m->kactive = 0;
+ m->ktlbnext = TLBOFF;
+}
+
+/*
+ * Process must be splhi
+ */
+static int
+newtlbpid(Proc *p)
+{
+ int i, s;
+ Proc **h;
+
+ i = m->lastpid;
+ h = m->pidproc;
+ for(s = 0; s < NTLBPID; s++) {
+ i++;
+ if(i >= NTLBPID)
+ i = 1;
+ if(h[i] == 0)
+ break;
+ }
+
+ if(h[i])
+ purgetlb(i);
+ if(h[i] != 0)
+ panic("newtlb");
+
+ m->pidproc[i] = p;
+ p->pidonmach[m->machno] = i;
+ m->lastpid = i;
+
+ return i;
+}
+
+void
+mmuswitch(Proc *p)
+{
+ int tp;
+ static char lasttext[32];
+
+ if(Debugswitch && !p->kp){
+ if(strncmp(lasttext, p->text, sizeof lasttext) != 0)
+ iprint("[%s]", p->text);
+ strncpy(lasttext, p->text, sizeof lasttext);
+ }
+
+ if(p->newtlb) {
+ memset(p->pidonmach, 0, sizeof p->pidonmach);
+ p->newtlb = 0;
+ }
+ tp = p->pidonmach[m->machno];
+ if(tp == 0)
+ tp = newtlbpid(p);
+ puttlbx(0, KZERO|PTEPID(tp), 0, 0, PGSZ);
+}
+
+void
+mmurelease(Proc *p)
+{
+ memset(p->pidonmach, 0, sizeof p->pidonmach);
+}
+
+
+/* tlbvirt also has TLBPID() in its low byte as the asid */
+static Softtlb*
+putstlb(ulong tlbvirt, ulong tlbphys)
+{
+ int odd;
+ Softtlb *entry;
+
+ /* identical calculation in l.s/utlbmiss */
+ entry = &m->stb[stlbhash(tlbvirt)];
+ odd = tlbvirt & BY2PG; /* even/odd bit */
+ tlbvirt &= ~BY2PG; /* zero even/odd bit */
+ if(entry->virt != tlbvirt) { /* not my entry? overwrite it */
+ if(entry->virt != 0) {
+ m->hashcoll++;
+ if (Debughash)
+ iprint("putstlb: hash collision: %#lx old virt "
+ "%#lux new virt %#lux page %#lux\n",
+ entry - m->stb, entry->virt, tlbvirt,
+ tlbvirt >> (PGSHIFT+1));
+ }
+ entry->virt = tlbvirt;
+ entry->phys0 = 0;
+ entry->phys1 = 0;
+ }
+
+ if(odd)
+ entry->phys1 = tlbphys;
+ else
+ entry->phys0 = tlbphys;
+
+ if(entry->phys0 == 0 && entry->phys1 == 0)
+ entry->virt = 0;
+
+ return entry;
+}
+
+void
+putmmu(ulong tlbvirt, ulong tlbphys, Page *pg)
+{
+ short tp;
+ ulong tlbent[3];
+ Softtlb *entry;
+ int s, x;
+
+ s = splhi();
+ tp = up->pidonmach[m->machno];
+ if(tp == 0)
+ tp = newtlbpid(up);
+
+ tlbvirt |= PTEPID(tp);
+ if((tlbphys & PTEALGMASK) != PTEUNCACHED) {
+ tlbphys &= ~PTEALGMASK;
+ tlbphys |= PTECACHABILITY;
+ }
+
+ entry = putstlb(tlbvirt, tlbphys);
+ x = gettlbp(tlbvirt, tlbent);
+ if(x < 0) x = getrandom();
+ puttlbx(x, entry->virt, entry->phys0, entry->phys1, PGSZ);
+ if(pg->txtflush & (1<<m->machno)){
+ icflush((void*)pg->va, BY2PG);
+ pg->txtflush &= ~(1<<m->machno);
+ }
+ splx(s);
+}
+
+void
+purgetlb(int pid)
+{
+ int i, mno;
+ Proc *sp, **pidproc;
+ Softtlb *entry, *etab;
+
+ m->tlbpurge++;
+
+ /*
+ * find all pid entries that are no longer used by processes
+ */
+ mno = m->machno;
+ pidproc = m->pidproc;
+ for(i=1; i<NTLBPID; i++) {
+ sp = pidproc[i];
+ if(sp && sp->pidonmach[mno] != i)
+ pidproc[i] = 0;
+ }
+
+ /*
+ * shoot down the one we want
+ */
+ sp = pidproc[pid];
+ if(sp != 0)
+ sp->pidonmach[mno] = 0;
+ pidproc[pid] = 0;
+
+ /*
+ * clean out all dead pids from the stlb;
+ */
+ entry = m->stb;
+ for(etab = &entry[STLBSIZE]; entry < etab; entry++)
+ if(pidproc[TLBPID(entry->virt)] == 0)
+ entry->virt = 0;
+
+ /*
+ * clean up the hardware
+ */
+ for(i=tlbroff; i<NTLB; i++)
+ if(pidproc[TLBPID(gettlbvirt(i))] == 0)
+ TLBINVAL(i, pid);
+}
+
+void
+flushmmu(void)
+{
+ int s;
+
+ s = splhi();
+ up->newtlb = 1;
+ mmuswitch(up);
+ splx(s);
+}
+
+void
+checkmmu(ulong, ulong)
+{
+}
+
+void
+countpagerefs(ulong*, int)
+{
+}
+
+/*
+ * Return the number of bytes that can be accessed via KADDR(pa).
+ * If pa is not a valid argument to KADDR, return 0.
+ */
+ulong
+cankaddr(ulong pa)
+{
+ if(pa >= KZERO)
+ return 0;
+ return -KZERO - pa;
+}
--- /dev/null
+++ b/sys/src/9/sgi/trap.c
@@ -1,0 +1,906 @@
+/*
+ * traps, exceptions, faults and interrupts on ar7161
+ */
+#include "u.h"
+#include "../port/lib.h"
+#include "mem.h"
+#include "dat.h"
+#include "fns.h"
+#include "ureg.h"
+#include "io.h"
+#include <tos.h>
+#include "../port/error.h"
+
+typedef struct Handler Handler;
+
+struct Handler {
+ void (*handler)(Ureg*, void *);
+ void *arg;
+ Handler *next; /* at this interrupt level */
+};
+
+int intr(Ureg*);
+void kernfault(Ureg*, int);
+void noted(Ureg*, ulong);
+void rfnote(Ureg**);
+
+char *excname[] =
+{
+ "trap: external interrupt",
+ "trap: TLB modification (store to unwritable)",
+ "trap: TLB miss (load or fetch)",
+ "trap: TLB miss (store)",
+ "trap: address error (load or fetch)",
+ "trap: address error (store)",
+ "trap: bus error (fetch)",
+ "trap: bus error (data load or store)",
+ "trap: system call",
+ "breakpoint",
+ "trap: reserved instruction",
+ "trap: coprocessor unusable",
+ "trap: arithmetic overflow",
+ "trap: TRAP exception",
+ "trap: VCE (instruction)",
+ "trap: floating-point exception",
+ "trap: coprocessor 2 implementation-specific", /* used as sys call for debugger */
+ "trap: corextend unusable",
+ "trap: precise coprocessor 2 exception",
+ "trap: TLB read-inhibit",
+ "trap: TLB execute-inhibit",
+ "trap: undefined 21",
+ "trap: undefined 22",
+ "trap: WATCH exception",
+ "trap: machine checkcore",
+ "trap: undefined 25",
+ "trap: undefined 26",
+ "trap: undefined 27",
+ "trap: undefined 28",
+ "trap: undefined 29",
+ "trap: cache error",
+ "trap: VCE (data)",
+};
+
+char *fpcause[] =
+{
+ "inexact operation",
+ "underflow",
+ "overflow",
+ "division by zero",
+ "invalid operation",
+};
+char *fpexcname(Ureg*, ulong, char*, uint);
+#define FPEXPMASK (0x3f<<12) /* Floating exception bits in fcr31 */
+
+struct {
+ char *name;
+ uint off;
+} regname[] = {
+ "STATUS", offsetof(Ureg, status),
+ "PC", offsetof(Ureg, pc),
+ "SP", offsetof(Ureg, sp),
+ "CAUSE",offsetof(Ureg, cause),
+ "BADADDR", offsetof(Ureg, badvaddr),
+ "TLBVIRT", offsetof(Ureg, tlbvirt),
+ "HI", offsetof(Ureg, hi),
+ "LO", offsetof(Ureg, lo),
+ "R31", offsetof(Ureg, r31),
+ "R30", offsetof(Ureg, r30),
+ "R28", offsetof(Ureg, r28),
+ "R27", offsetof(Ureg, r27),
+ "R26", offsetof(Ureg, r26),
+ "R25", offsetof(Ureg, r25),
+ "R24", offsetof(Ureg, r24),
+ "R23", offsetof(Ureg, r23),
+ "R22", offsetof(Ureg, r22),
+ "R21", offsetof(Ureg, r21),
+ "R20", offsetof(Ureg, r20),
+ "R19", offsetof(Ureg, r19),
+ "R18", offsetof(Ureg, r18),
+ "R17", offsetof(Ureg, r17),
+ "R16", offsetof(Ureg, r16),
+ "R15", offsetof(Ureg, r15),
+ "R14", offsetof(Ureg, r14),
+ "R13", offsetof(Ureg, r13),
+ "R12", offsetof(Ureg, r12),
+ "R11", offsetof(Ureg, r11),
+ "R10", offsetof(Ureg, r10),
+ "R9", offsetof(Ureg, r9),
+ "R8", offsetof(Ureg, r8),
+ "R7", offsetof(Ureg, r7),
+ "R6", offsetof(Ureg, r6),
+ "R5", offsetof(Ureg, r5),
+ "R4", offsetof(Ureg, r4),
+ "R3", offsetof(Ureg, r3),
+ "R2", offsetof(Ureg, r2),
+ "R1", offsetof(Ureg, r1),
+};
+
+static Handler handlers[8];
+
+void
+kvce(Ureg *ur, int ecode)
+{
+ char c;
+ Pte **p;
+ Page **pg;
+ Segment *s;
+ ulong addr, soff;
+
+ c = 'D';
+ if(ecode == CVCEI)
+ c = 'I';
+ print("Trap: VCE%c: addr=%#lux\n", c, ur->badvaddr);
+ if(up && !(ur->badvaddr & KSEGM)) {
+ addr = ur->badvaddr;
+ s = seg(up, addr, 0);
+ if(s == nil){
+ print("kvce: no seg for %#lux\n", addr);
+ for(;;);
+ }
+ addr &= ~(BY2PG-1);
+ soff = addr - s->base;
+ p = &s->map[soff/PTEMAPMEM];
+ if(*p){
+ pg = &(*p)->pages[(soff&(PTEMAPMEM-1))/BY2PG];
+ if(*pg)
+ print("kvce: pa=%#lux, va=%#lux\n",
+ (*pg)->pa, (*pg)->va);
+ else
+ print("kvce: no *pg\n");
+ }else
+ print("kvce: no *p\n");
+ }
+}
+
+/* prepare to go to user space */
+void
+kexit(Ureg *ur)
+{
+ Tos *tos;
+
+ /* replicate fpstate to ureg status */
+ if(up->fpstate != FPactive)
+ ur->status &= ~CU1;
+
+ /* precise time accounting, kernel exit */
+ tos = (Tos*)(USTKTOP-sizeof(Tos));
+ tos->kcycles += fastticks(&tos->cyclefreq) - up->kentry;
+ tos->pcycles = up->pcycles;
+ tos->pid = up->pid;
+}
+
+void
+trap(Ureg *ur)
+{
+ int ecode, clockintr, user, cop, x, fpchk;
+ ulong fpfcr31;
+ char buf[2*ERRMAX], buf1[ERRMAX], *fpexcep;
+ static int dumps;
+
+ if (up && (char *)(ur) - up->kstack < 1024 && dumps++ == 0) {
+ iprint("trap: proc %ld kernel stack getting full\n", up->pid);
+ dumpregs(ur);
+ dumpstack();
+ for(;;);
+ }
+ if (up == nil &&
+ (char *)(ur) - (char *)m->stack < 1024 && dumps++ == 0) {
+ iprint("trap: cpu%d kernel stack getting full\n", m->machno);
+ dumpregs(ur);
+ dumpstack();
+ for(;;);
+ }
+
+ ecode = (ur->cause>>2)&EXCMASK;
+ user = userureg(ur);
+ if (ur->cause & TS)
+ panic("trap: tlb shutdown");
+
+ fpchk = 0;
+ if(user){
+ up->dbgreg = ur;
+ cycles(&up->kentry);
+ }
+
+ clockintr = 0;
+ switch(ecode){
+ case CINT:
+ clockintr = intr(ur);
+ break;
+
+ case CFPE:
+ if(!user)
+ goto Default;
+ if(up->fpstate == FPactive){
+ savefpregs(&up->fpsave);
+ up->fpstate = FPinactive;
+ }
+ clrfpintr();
+ fptrap(ur);
+ fpchk = 1;
+ break;
+
+ case CTLBM:
+ case CTLBL:
+ case CTLBS:
+ if(up == nil || !user && (ur->badvaddr & KSEGM) == KSEG3) {
+ kfault(ur);
+ break;
+ }
+ x = up->insyscall;
+ up->insyscall = 1;
+ spllo();
+ faultmips(ur, user, ecode);
+ up->insyscall = x;
+ break;
+
+ case CVCEI:
+ case CVCED:
+ kvce(ur, ecode);
+ goto Default;
+
+ case CWATCH:
+ if(!user)
+ panic("watchpoint trap from kernel mode pc=%#p",
+ ur->pc);
+ // fpwatch(ur);
+ break;
+
+ case CCPU:
+ cop = (ur->cause>>28)&3;
+ if(user && up && cop == 1) {
+ if(up->fpstate & FPillegal) {
+ /* someone used floating point in a note handler */
+ postnote(up, 1,
+ "sys: floating point in note handler",
+ NDebug);
+ break;
+ }
+ if(up->fpstate == FPinit || up->fpstate == FPinactive){
+ restfpregs(&up->fpsave, up->fpsave.fpstatus&~FPEXPMASK);
+ up->fpstate = FPactive;
+ ur->status |= CU1;
+ break;
+ }
+ fpchk = 1;
+ break;
+ }
+ /* Fallthrough */
+
+ Default:
+ default:
+ if(user) {
+ spllo();
+ snprint(buf, sizeof buf, "sys: %s", excname[ecode]);
+ postnote(up, 1, buf, NDebug);
+ break;
+ }
+ if (ecode == CADREL || ecode == CADRES)
+ iprint("kernel addr exception for va %#p pid %#ld %s\n",
+ ur->badvaddr, (up? up->pid: 0),
+ (up? up->text: ""));
+ print("cpu%d: kernel %s pc=%#lux\n",
+ m->machno, excname[ecode], ur->pc);
+ dumpregs(ur);
+ dumpstack();
+ if(m->machno == 0)
+ spllo();
+ exit(1);
+ }
+
+ if(fpchk) {
+ fpfcr31 = up->fpsave.fpstatus;
+ if((fpfcr31>>12) & ((fpfcr31>>7)|0x20) & 0x3f) {
+ spllo();
+ fpexcep = fpexcname(ur, fpfcr31, buf1, sizeof buf1);
+ snprint(buf, sizeof buf, "sys: fp: %s", fpexcep);
+ postnote(up, 1, buf, NDebug);
+ }
+ }
+
+ splhi();
+
+ /* delaysched set because we held a lock or because our quantum ended */
+ if(up && up->delaysched && clockintr){
+ sched();
+ splhi();
+ }
+
+ if(user){
+ notify(ur);
+ kexit(ur);
+ }
+}
+
+/* map HPC3 irq to INTR2 */
+int
+hpc3irqlevel(int irq)
+{
+ *IO(uchar, LIO_0_MASK) |= 1 << (irq & 7);
+ return 2 + irq/8;
+}
+
+/*
+ * set handlers
+ */
+void
+intrenable(int level, void (*h)(Ureg*, void *), void *arg)
+{
+ Handler *hp;
+
+ hp = &handlers[level];
+ if (hp->handler != nil) { /* occupied? */
+ /* add a new one at the end of the chain */
+ for (; hp->next != nil; hp = hp->next)
+ ;
+ if((hp->next = xalloc(sizeof *hp)) == nil)
+ panic("intrenable: out of memory");
+ hp = hp->next;
+ }
+ hp->arg = arg;
+ hp->handler = h;
+
+ intron(INTR0 << level);
+}
+
+int
+intr(Ureg *ur)
+{
+ ulong cause, mask;
+ int clockintr;
+ Handler *hh, *hp;
+
+ m->intr++;
+ clockintr = 0;
+ /*
+ * ignore interrupts that we have disabled, even if their cause bits
+ * are set.
+ */
+ cause = ur->cause & ur->status & INTMASK;
+ cause &= ~(INTR1|INTR0); /* ignore sw interrupts */
+ if(cause & INTR7){
+ clock(ur);
+ cause &= ~INTR7;
+ clockintr = 1;
+ }
+ hh = &handlers[2];
+ for(mask = INTR2; cause != 0 && mask < INTR7; mask <<= 1){
+ if(cause & mask){
+ for(hp = hh; hp != nil; hp = hp->next){
+ if(hp->handler != nil){
+ (*hp->handler)(ur, hp->arg);
+ cause &= ~mask;
+ }
+ }
+ }
+ hh++;
+ }
+ if(cause != 0)
+ iprint("unhandled interrupts %lux\n", cause);
+
+ /* preemptive scheduling */
+ if(up != nil && !clockintr)
+ preempted();
+ /* if it was a clockintr, sched will be called at end of trap() */
+ return clockintr;
+}
+
+char*
+fpexcname(Ureg *ur, ulong fcr31, char *buf, uint size)
+{
+ int i;
+ char *s;
+ ulong fppc;
+
+ fppc = ur->pc;
+ if(ur->cause & BD) /* branch delay */
+ fppc += 4;
+ s = 0;
+ if(fcr31 & (1<<17))
+ s = "unimplemented operation";
+ else{
+ fcr31 >>= 7; /* trap enable bits */
+ fcr31 &= (fcr31>>5); /* anded with exceptions */
+ for(i=0; i<5; i++)
+ if(fcr31 & (1<<i))
+ s = fpcause[i];
+ }
+
+ if(s == 0)
+ return "no floating point exception";
+
+ snprint(buf, size, "%s fppc=%#lux", s, fppc);
+ return buf;
+}
+
+static void
+getpcsp(ulong *pc, ulong *sp)
+{
+ *pc = getcallerpc(&pc);
+ *sp = (ulong)&pc-4;
+}
+
+void
+callwithureg(void (*fn)(Ureg*))
+{
+ Ureg ureg;
+
+ memset(&ureg, 0, sizeof ureg);
+ getpcsp((ulong*)&ureg.pc, (ulong*)&ureg.sp);
+ ureg.r31 = getcallerpc(&fn);
+ fn(&ureg);
+}
+
+static void
+_dumpstack(Ureg *ureg)
+{
+ ulong l, v, top, i;
+ extern ulong etext;
+
+ print("ktrace /kernel/path %.8lux %.8lux %.8lux\n",
+ ureg->pc, ureg->sp, ureg->r31);
+ if(up == nil)
+ top = (ulong)MACHADDR + MACHSIZE;
+ else
+ top = (ulong)up->kstack + KSTACK;
+ i = 0;
+ for(l=ureg->sp; l < top; l += BY2WD) {
+ v = *(ulong*)l;
+ if(KTZERO < v && v < (ulong)&etext) {
+ print("%.8lux=%.8lux ", l, v);
+ if((++i%4) == 0){
+ print("\n");
+ delay(200);
+ }
+ }
+ }
+ print("\n");
+}
+
+void
+dumpstack(void)
+{
+ callwithureg(_dumpstack);
+}
+
+static ulong
+R(Ureg *ur, int i)
+{
+ uchar *s;
+
+ s = (uchar*)ur;
+ return *(ulong*)(s + regname[i].off);
+}
+
+void
+dumpregs(Ureg *ur)
+{
+ int i;
+
+ if(up)
+ print("registers for %s %lud\n", up->text, up->pid);
+ else
+ print("registers for kernel\n");
+
+ for(i = 0; i < nelem(regname); i += 2)
+ print("%s\t%#.8lux\t%s\t%#.8lux\n",
+ regname[i].name, R(ur, i),
+ regname[i+1].name, R(ur, i+1));
+}
+
+int
+notify(Ureg *ur)
+{
+ int l, s;
+ ulong sp;
+ Note *n;
+
+ if(up->procctl)
+ procctl();
+ if(up->nnote == 0)
+ return 0;
+
+ if(up->fpstate == FPactive){
+ savefpregs(&up->fpsave);
+ up->fpstate = FPinactive;
+ }
+ up->fpstate |= FPillegal;
+
+ s = spllo();
+ qlock(&up->debug);
+ up->notepending = 0;
+ n = &up->note[0];
+ if(strncmp(n->msg, "sys:", 4) == 0) {
+ l = strlen(n->msg);
+ if(l > ERRMAX-15) /* " pc=0x12345678\0" */
+ l = ERRMAX-15;
+
+ seprint(n->msg+l, &n->msg[sizeof n->msg], " pc=%#lux", ur->pc);
+ }
+
+ if(n->flag != NUser && (up->notified || up->notify==0)) {
+ if(n->flag == NDebug)
+ pprint("suicide: %s\n", n->msg);
+
+ qunlock(&up->debug);
+ pexit(n->msg, n->flag!=NDebug);
+ }
+
+ if(up->notified) {
+ qunlock(&up->debug);
+ splx(s);
+ return 0;
+ }
+
+ if(!up->notify) {
+ qunlock(&up->debug);
+ pexit(n->msg, n->flag!=NDebug);
+ }
+ sp = ur->usp & ~(BY2V-1);
+ sp -= sizeof(Ureg);
+
+ if(!okaddr((ulong)up->notify, BY2WD, 0) ||
+ !okaddr(sp-ERRMAX-4*BY2WD, sizeof(Ureg)+ERRMAX+4*BY2WD, 1)) {
+ pprint("suicide: bad address or sp in notify\n");
+ qunlock(&up->debug);
+ pexit("Suicide", 0);
+ }
+
+ memmove((Ureg*)sp, ur, sizeof(Ureg)); /* push user regs */
+ *(Ureg**)(sp-BY2WD) = up->ureg; /* word under Ureg is old up->ureg */
+ up->ureg = (void*)sp;
+
+ sp -= BY2WD+ERRMAX;
+ memmove((char*)sp, up->note[0].msg, ERRMAX); /* push err string */
+
+ sp -= 3*BY2WD;
+ *(ulong*)(sp+2*BY2WD) = sp+3*BY2WD; /* arg 2 is string */
+ ur->r1 = (long)up->ureg; /* arg 1 is ureg* */
+ ((ulong*)sp)[1] = (ulong)up->ureg; /* arg 1 0(FP) is ureg* */
+ ((ulong*)sp)[0] = 0; /* arg 0 is pc */
+ ur->usp = sp;
+ /*
+ * arrange to resume at user's handler as if handler(ureg, errstr)
+ * were being called.
+ */
+ ur->pc = (ulong)up->notify;
+
+ up->notified = 1;
+ up->nnote--;
+ memmove(&up->lastnote, &up->note[0], sizeof(Note));
+ memmove(&up->note[0], &up->note[1], up->nnote*sizeof(Note));
+
+ qunlock(&up->debug);
+ splx(s);
+ return 1;
+}
+
+/*
+ * Check that status is OK to return from note.
+ */
+int
+validstatus(ulong kstatus, ulong ustatus)
+{
+// if((kstatus & (INTMASK|KX|SX|UX)) != (ustatus & (INTMASK|KX|SX|UX)))
+ if((kstatus & INTMASK) != (ustatus & INTMASK))
+ return 0;
+ if((ustatus&(KSU|ERL|EXL|IE)) != (KUSER|EXL|IE))
+ return 0;
+ if(ustatus & (0xFFFF0000&~CU1)) /* no CU3, CU2, CU0, RP, FR, RE, DS */
+ return 0;
+ return 1;
+}
+
+/*
+ * Return user to state before notify(); called from user's handler.
+ */
+void
+noted(Ureg *kur, ulong arg0)
+{
+ Ureg *nur;
+ ulong oureg, sp;
+
+ qlock(&up->debug);
+ if(arg0!=NRSTR && !up->notified) {
+ qunlock(&up->debug);
+ pprint("call to noted() when not notified\n");
+ pexit("Suicide", 0);
+ }
+ up->notified = 0;
+
+ up->fpstate &= ~FPillegal;
+
+ nur = up->ureg;
+
+ oureg = (ulong)nur;
+ if((oureg & (BY2WD-1))
+ || !okaddr((ulong)oureg-BY2WD, BY2WD+sizeof(Ureg), 0)){
+ pprint("bad up->ureg in noted or call to noted() when not notified\n");
+ qunlock(&up->debug);
+ pexit("Suicide", 0);
+ }
+
+ if(0 && !validstatus(kur->status, nur->status)) {
+ qunlock(&up->debug);
+ pprint("bad noted ureg status %#lux\n", nur->status);
+ pexit("Suicide", 0);
+ }
+
+ memmove(kur, up->ureg, sizeof(Ureg));
+ switch(arg0) {
+ case NCONT:
+ case NRSTR: /* only used by APE */
+ if(!okaddr(nur->pc, BY2WD, 0) || !okaddr(nur->usp, BY2WD, 0)){
+ pprint("suicide: trap in noted\n");
+ qunlock(&up->debug);
+ pexit("Suicide", 0);
+ }
+ up->ureg = (Ureg*)(*(ulong*)(oureg-BY2WD));
+ qunlock(&up->debug);
+ splhi();
+ break;
+
+ case NSAVE: /* only used by APE */
+ if(!okaddr(nur->pc, BY2WD, 0) || !okaddr(nur->usp, BY2WD, 0)){
+ pprint("suicide: trap in noted\n");
+ qunlock(&up->debug);
+ pexit("Suicide", 0);
+ }
+ qunlock(&up->debug);
+ sp = oureg-4*BY2WD-ERRMAX;
+ splhi();
+ kur->sp = sp;
+ kur->r1 = oureg; /* arg 1 is ureg* */
+ ((ulong*)sp)[1] = oureg; /* arg 1 0(FP) is ureg* */
+ ((ulong*)sp)[0] = 0; /* arg 0 is pc */
+ break;
+
+ default:
+ pprint("unknown noted arg %#lux\n", arg0);
+ up->lastnote.flag = NDebug;
+ /* fall through */
+
+ case NDFLT:
+ if(up->lastnote.flag == NDebug)
+ pprint("suicide: %s\n", up->lastnote.msg);
+ qunlock(&up->debug);
+ pexit(up->lastnote.msg, up->lastnote.flag!=NDebug);
+ }
+}
+
+#include "../port/systab.h"
+
+static void
+sctracesetup(ulong scallnr, ulong sp, uintptr pc, vlong *startnsp)
+{
+ if(up->procctl == Proc_tracesyscall){
+ /*
+ * Redundant validaddr. Do we care?
+ * Tracing syscalls is not exactly a fast path...
+ * Beware, validaddr currently does a pexit rather
+ * than an error if there's a problem; that might
+ * change in the future.
+ */
+ if(sp < (USTKTOP-BY2PG) || sp > (USTKTOP-sizeof(Sargs)-BY2WD))
+ validaddr(sp, sizeof(Sargs)+BY2WD, 0);
+
+ syscallfmt(scallnr, pc, (va_list)(sp+BY2WD));
+ up->procctl = Proc_stopme;
+ procctl();
+ if(up->syscalltrace)
+ free(up->syscalltrace);
+ up->syscalltrace = nil;
+ *startnsp = todget(nil);
+ }
+}
+
+static void
+sctracefinish(ulong scallnr, ulong sp, int ret, vlong startns)
+{
+ int s;
+
+ if(up->procctl == Proc_tracesyscall){
+ up->procctl = Proc_stopme;
+ sysretfmt(scallnr, (va_list)(sp+BY2WD), ret,
+ startns, todget(nil));
+ s = splhi();
+ procctl();
+ splx(s);
+ if(up->syscalltrace)
+ free(up->syscalltrace);
+ up->syscalltrace = nil;
+ }
+}
+
+/*
+ * called directly from assembler, not via trap()
+ */
+void
+syscall(Ureg *ur)
+{
+ int i;
+ volatile long ret;
+ ulong sp, scallnr;
+ vlong startns;
+ char *e;
+
+ cycles(&up->kentry);
+
+ m->syscall++;
+ up->insyscall = 1;
+ up->pc = ur->pc;
+ up->dbgreg = ur;
+ ur->cause = 16<<2; /* for debugging: system call is undef 16 */
+
+ scallnr = ur->r1;
+ up->scallnr = ur->r1;
+ sp = ur->sp;
+ sctracesetup(scallnr, sp, ur->pc, &startns);
+
+ /* no fpu, so no fp state to save */
+ spllo();
+
+ up->nerrlab = 0;
+ ret = -1;
+ if(!waserror()) {
+ if(scallnr >= nsyscall || systab[scallnr] == 0){
+ pprint("bad sys call number %ld pc %#lux\n",
+ scallnr, ur->pc);
+ postnote(up, 1, "sys: bad sys call", NDebug);
+ error(Ebadarg);
+ }
+
+ if(sp & (BY2WD-1)){
+ pprint("odd sp in sys call pc %#lux sp %#lux\n",
+ ur->pc, ur->sp);
+ postnote(up, 1, "sys: odd stack", NDebug);
+ error(Ebadarg);
+ }
+
+ if(sp<(USTKTOP-BY2PG) || sp>(USTKTOP-sizeof(Sargs)-BY2WD))
+ validaddr(sp, sizeof(Sargs)+BY2WD, 0);
+
+ up->s = *((Sargs*)(sp+BY2WD));
+ up->psstate = sysctab[scallnr];
+
+ ret = systab[scallnr]((va_list)up->s.args);
+ poperror();
+ }else{
+ /* failure: save the error buffer for errstr */
+ e = up->syserrstr;
+ up->syserrstr = up->errstr;
+ up->errstr = e;
+ if(0 && up->pid == 1)
+ print("[%lud %s] syscall %lud: %s\n",
+ up->pid, up->text, scallnr, up->errstr);
+ }
+ if(up->nerrlab){
+ print("bad errstack [%lud]: %d extra\n", scallnr, up->nerrlab);
+ for(i = 0; i < NERR; i++)
+ print("sp=%#lux pc=%#lux\n",
+ up->errlab[i].sp, up->errlab[i].pc);
+ panic("error stack");
+ }
+ sctracefinish(scallnr, sp, ret, startns);
+
+ ur->pc += 4;
+ ur->r1 = ret;
+
+ up->psstate = 0;
+ up->insyscall = 0;
+
+ if(scallnr == NOTED) /* ugly hack */
+ noted(ur, *(ulong*)(sp+BY2WD)); /* may return */
+ splhi();
+ if(scallnr!=RFORK && (up->procctl || up->nnote))
+ notify(ur);
+ /* if we delayed sched because we held a lock, sched now */
+ if(up->delaysched)
+ sched();
+ kexit(ur);
+}
+
+void
+forkchild(Proc *p, Ureg *ur)
+{
+ Ureg *cur;
+
+ p->sched.sp = (ulong)p->kstack+KSTACK-UREGSIZE;
+ p->sched.pc = (ulong)forkret;
+
+ cur = (Ureg*)(p->sched.sp+2*BY2WD);
+ memmove(cur, ur, sizeof(Ureg));
+
+ cur->status &= ~CU1; /* FPU off when returning */
+
+ cur->r1 = 0;
+ cur->pc += 4;
+
+ /* Things from bottom of syscall we never got to execute */
+ p->psstate = 0;
+ p->insyscall = 0;
+}
+
+static
+void
+linkproc(void)
+{
+ spllo();
+ up->kpfun(up->kparg);
+ pexit("kproc exiting", 0);
+}
+
+void
+kprocchild(Proc *p, void (*func)(void*), void *arg)
+{
+ p->sched.pc = (ulong)linkproc;
+ p->sched.sp = (ulong)p->kstack+KSTACK;
+
+ p->kpfun = func;
+ p->kparg = arg;
+}
+
+/* set up user registers before return from exec() */
+uintptr
+execregs(ulong entry, ulong ssize, ulong nargs)
+{
+ Ureg *ur;
+ ulong *sp;
+
+ sp = (ulong*)(USTKTOP - ssize);
+ *--sp = nargs;
+
+ ur = (Ureg*)up->dbgreg;
+ ur->usp = (ulong)sp;
+ ur->pc = entry - 4; /* syscall advances it */
+ return USTKTOP-sizeof(Tos); /* address of kernel/user shared data */
+}
+
+ulong
+userpc(void)
+{
+ Ureg *ur;
+
+ ur = (Ureg*)up->dbgreg;
+ return ur->pc;
+}
+
+/*
+ * This routine must save the values of registers the user is not
+ * permitted to write from devproc and then restore the saved values
+ * before returning
+ */
+void
+setregisters(Ureg *xp, char *pureg, char *uva, int n)
+{
+ ulong status;
+
+ status = xp->status;
+ memmove(pureg, uva, n);
+ xp->status = status;
+}
+
+/*
+ * Give enough context in the ureg to produce a kernel stack for
+ * a sleeping process
+ */
+void
+setkernur(Ureg *xp, Proc *p)
+{
+ xp->pc = p->sched.pc;
+ xp->sp = p->sched.sp;
+ xp->r24 = (ulong)p; /* up */
+ xp->r31 = (ulong)sched;
+}
+
+ulong
+dbgpc(Proc *p)
+{
+ Ureg *ur;
+
+ ur = p->dbgreg;
+ if(ur == 0)
+ return 0;
+
+ return ur->pc;
+}
--- /dev/null
+++ b/sys/src/9/sgi/uartarcs.c
@@ -1,0 +1,208 @@
+/*
+ * ARCS console.
+ */
+
+#include "u.h"
+#include "../port/lib.h"
+#include "mem.h"
+#include "dat.h"
+#include "fns.h"
+#include "../port/error.h"
+#include "io.h"
+
+extern PhysUart arcsphysuart;
+
+static Uart arcsuart = {
+ .name = "arcs",
+ .freq = 1843200,
+ .phys = &arcsphysuart,
+};
+
+static Lock arcslock;
+
+void
+arcsputc(char c)
+{
+ int r;
+
+ r = 0;
+ ilock(&arcslock);
+ arcs(0x6c, 1, &c, 1, &r);
+ iunlock(&arcslock);
+}
+
+int
+arcsgetc(void)
+{
+ int c, r;
+ uchar b;
+
+ r = 0;
+ c = -1;
+ ilock(&arcslock);
+ if(arcs(0x68, 0) == 0)
+ if(arcs(0x64, 0, &b, 1, &r) == 0)
+ if(r == 1)
+ c = b;
+ iunlock(&arcslock);
+ return c;
+}
+
+void
+arcsproc(void*)
+{
+ int c;
+
+ while(waserror())
+ ;
+ for(;;){
+ //sched();
+ tsleep(&up->sleep, return0, nil, 50);
+ c = arcsgetc();
+ if(c < 0)
+ continue;
+ uartrecv(&arcsuart, c);
+ }
+}
+
+/*
+ * Send queued output to console
+ */
+static void
+kick(Uart *uart)
+{
+ int n;
+
+ for(n=0; uart->op < uart->oe || uartstageoutput(uart) > 0; uart->op += n){
+ n = uart->oe - uart->op;
+ if(n <= 0 || !canlock(&arcslock))
+ break;
+ if(arcs(0x6c, 1, uart->op, n, &n) != 0)
+ n = -1;
+ unlock(&arcslock);
+ if(n <= 0)
+ break;
+ }
+}
+
+static void
+interrupt(Ureg*, void *)
+{
+}
+
+static Uart*
+pnp(void)
+{
+ return &arcsuart;
+}
+
+static void
+enable(Uart*, int)
+{
+}
+
+static void
+disable(Uart*)
+{
+}
+
+static void
+donothing(Uart*, int)
+{
+}
+
+static int
+donothingint(Uart*, int)
+{
+ return 0;
+}
+
+static int
+baud(Uart *uart, int n)
+{
+ if(n <= 0)
+ return -1;
+
+ uart->baud = n;
+ return 0;
+}
+
+static int
+bits(Uart *uart, int n)
+{
+ switch(n){
+ case 7:
+ case 8:
+ break;
+ default:
+ return -1;
+ }
+
+ uart->bits = n;
+ return 0;
+}
+
+static int
+stop(Uart *uart, int n)
+{
+ if(n != 1)
+ return -1;
+ uart->stop = n;
+ return 0;
+}
+
+static int
+parity(Uart *uart, int n)
+{
+ if(n != 'n')
+ return -1;
+ uart->parity = n;
+ return 0;
+}
+
+static long
+status(Uart *, void *, long, long)
+{
+ return 0;
+}
+
+void
+uartarcsputc(Uart*, int c)
+{
+ arcsputc(c);
+}
+
+int
+uartarcsgetc(Uart*)
+{
+ return arcsgetc();
+}
+
+PhysUart arcsphysuart = {
+ .name = "arcsuart",
+
+ .pnp = pnp,
+ .enable = enable,
+ .disable = disable,
+ .kick = kick,
+ .dobreak = donothing,
+ .baud = baud,
+ .bits = bits,
+ .stop = stop,
+ .parity = parity,
+ .modemctl = donothing,
+ .rts = donothing,
+ .dtr = donothing,
+ .status = status,
+ .fifo = donothing,
+
+ .getc = uartarcsgetc,
+ .putc = uartarcsputc,
+};
+
+void
+arcsconsinit(void)
+{
+ consuart = &arcsuart;
+ consuart->console = 1;
+}