ref: 015180f99be4553dc4851f63edff04d0fc16a66a
parent: 515f4d59b1b865bc25d2e139d545c24d83cb4212
author: cinap_lenrek <cinap_lenrek@felloff.net>
date: Sun Oct 29 13:23:23 EDT 2023
imx8: share generic arm64 mmu.c Split layout specific code into mem.c from mmu.c, so generic mmu code can be shared.
--- a/sys/src/9/arm64/fns.h
+++ b/sys/src/9/arm64/fns.h
@@ -69,18 +69,17 @@
#define VA(k) ((uintptr)(k))
extern KMap *kmap(Page*);
extern void kunmap(KMap*);
+extern void kmapram(uintptr, uintptr);
extern uintptr mmukmap(uintptr, uintptr, usize);
extern void* vmap(uvlong, vlong);
extern void vunmap(void*, vlong);
-
-extern void mmu0init(uintptr*);
-extern void mmuidmap(uintptr*);
extern void mmu1init(void);
-extern void meminit(void);
-
extern void putasid(Proc*);
-extern void* ucalloc(usize);
+/* mem */
+extern void mmuidmap(uintptr*);
+extern void mmu0init(uintptr*);
+extern void meminit(void);
/* clock */
extern void clockinit(void);
--- /dev/null
+++ b/sys/src/9/arm64/mem.c
@@ -1,0 +1,80 @@
+#include "u.h"
+#include "../port/lib.h"
+#include "mem.h"
+#include "dat.h"
+#include "fns.h"
+#include "../arm64/sysreg.h"
+
+#define INITMAP (ROUND((uintptr)end + BY2PG, PGLSZ(1))-KZERO)
+
+/*
+ * Create initial identity map in top-level page table
+ * (L1BOT) for TTBR0. This page table is only used until
+ * mmu1init() loads m->mmutop.
+ */
+void
+mmuidmap(uintptr *l1bot)
+{
+ uintptr pa, pe, attr;
+
+ /* VDRAM */
+ attr = PTEWRITE | PTEAF | PTEKERNEL | PTEUXN | PTESH(SHARE_INNER);
+ pe = -KZERO;
+ for(pa = VDRAM - KZERO; pa < pe; pa += PGLSZ(PTLEVELS-1))
+ l1bot[PTLX(pa, PTLEVELS-1)] = pa | PTEVALID | PTEBLOCK | attr;
+}
+
+/*
+ * Create initial shared kernel page table (L1) for TTBR1.
+ * This page table coveres the INITMAP and VIRTIO,
+ * and later we fill the ram mappings in meminit().
+ */
+void
+mmu0init(uintptr *l1)
+{
+ uintptr va, pa, pe, attr;
+
+ /* DRAM - INITMAP */
+ attr = PTEWRITE | PTEAF | PTEKERNEL | PTEUXN | PTESH(SHARE_INNER);
+ pe = INITMAP;
+ for(pa = VDRAM - KZERO, va = VDRAM; pa < pe; pa += PGLSZ(1), va += PGLSZ(1))
+ l1[PTL1X(va, 1)] = pa | PTEVALID | PTEBLOCK | attr;
+
+ /* VIRTIO */
+ attr = PTEWRITE | PTEAF | PTEKERNEL | PTEUXN | PTEPXN | PTESH(SHARE_OUTER) | PTEDEVICE;
+ pe = PHYSIOEND;
+ for(pa = PHYSIO, va = VIRTIO; pa < pe; pa += PGLSZ(1), va += PGLSZ(1)){
+ if(((pa|va) & PGLSZ(1)-1) != 0){
+ l1[PTL1X(va, 1)] = (uintptr)l1 | PTEVALID | PTETABLE;
+ for(; pa < pe && ((va|pa) & PGLSZ(1)-1) != 0; pa += PGLSZ(0), va += PGLSZ(0)){
+ assert(l1[PTLX(va, 0)] == 0);
+ l1[PTLX(va, 0)] = pa | PTEVALID | PTEPAGE | attr;
+ }
+ break;
+ }
+ l1[PTL1X(va, 1)] = pa | PTEVALID | PTEBLOCK | attr;
+ }
+
+ if(PTLEVELS > 2)
+ for(va = KSEG0; va != 0; va += PGLSZ(2))
+ l1[PTL1X(va, 2)] = (uintptr)&l1[L1TABLEX(va, 1)] | PTEVALID | PTETABLE;
+
+ if(PTLEVELS > 3)
+ for(va = KSEG0; va != 0; va += PGLSZ(3))
+ l1[PTL1X(va, 3)] = (uintptr)&l1[L1TABLEX(va, 2)] | PTEVALID | PTETABLE;
+}
+
+void
+meminit(void)
+{
+ char *p;
+
+ conf.mem[0].base = PGROUND((uintptr)end - KZERO);
+ conf.mem[0].limit = GiB + 128 * MiB;
+ if(p = getconf("*maxmem"))
+ conf.mem[0].limit = strtoull(p, 0, 0);
+
+ kmapram(conf.mem[0].base, conf.mem[0].limit);
+
+ conf.mem[0].npage = (conf.mem[0].limit - conf.mem[0].base)/BY2PG;
+}
--- a/sys/src/9/arm64/mkfile
+++ b/sys/src/9/arm64/mkfile
@@ -47,6 +47,7 @@
fpu.$O\
main.$O\
mmu.$O\
+ mem.$O\
sysreg.$O\
random.$O\
trap.$O\
--- a/sys/src/9/arm64/mmu.c
+++ b/sys/src/9/arm64/mmu.c
@@ -3,68 +3,10 @@
#include "mem.h"
#include "dat.h"
#include "fns.h"
-#include "sysreg.h"
#define INITMAP (ROUND((uintptr)end + BY2PG, PGLSZ(1))-KZERO)
-/*
- * Create initial identity map in top-level page table
- * (L1BOT) for TTBR0. This page table is only used until
- * mmu1init() loads m->mmutop.
- */
void
-mmuidmap(uintptr *l1bot)
-{
- uintptr pa, pe, attr;
-
- /* VDRAM */
- attr = PTEWRITE | PTEAF | PTEKERNEL | PTEUXN | PTESH(SHARE_INNER);
- pe = -KZERO;
- for(pa = VDRAM - KZERO; pa < pe; pa += PGLSZ(PTLEVELS-1))
- l1bot[PTLX(pa, PTLEVELS-1)] = pa | PTEVALID | PTEBLOCK | attr;
-}
-
-/*
- * Create initial shared kernel page table (L1) for TTBR1.
- * This page table coveres the INITMAP and VIRTIO,
- * and later we fill the ram mappings in meminit().
- */
-void
-mmu0init(uintptr *l1)
-{
- uintptr va, pa, pe, attr;
-
- /* DRAM - INITMAP */
- attr = PTEWRITE | PTEAF | PTEKERNEL | PTEUXN | PTESH(SHARE_INNER);
- pe = INITMAP;
- for(pa = VDRAM - KZERO, va = VDRAM; pa < pe; pa += PGLSZ(1), va += PGLSZ(1))
- l1[PTL1X(va, 1)] = pa | PTEVALID | PTEBLOCK | attr;
-
- /* VIRTIO */
- attr = PTEWRITE | PTEAF | PTEKERNEL | PTEUXN | PTEPXN | PTESH(SHARE_OUTER) | PTEDEVICE;
- pe = PHYSIOEND;
- for(pa = PHYSIO, va = VIRTIO; pa < pe; pa += PGLSZ(1), va += PGLSZ(1)){
- if(((pa|va) & PGLSZ(1)-1) != 0){
- l1[PTL1X(va, 1)] = (uintptr)l1 | PTEVALID | PTETABLE;
- for(; pa < pe && ((va|pa) & PGLSZ(1)-1) != 0; pa += PGLSZ(0), va += PGLSZ(0)){
- assert(l1[PTLX(va, 0)] == 0);
- l1[PTLX(va, 0)] = pa | PTEVALID | PTEPAGE | attr;
- }
- break;
- }
- l1[PTL1X(va, 1)] = pa | PTEVALID | PTEBLOCK | attr;
- }
-
- if(PTLEVELS > 2)
- for(va = KSEG0; va != 0; va += PGLSZ(2))
- l1[PTL1X(va, 2)] = (uintptr)&l1[L1TABLEX(va, 1)] | PTEVALID | PTETABLE;
-
- if(PTLEVELS > 3)
- for(va = KSEG0; va != 0; va += PGLSZ(3))
- l1[PTL1X(va, 3)] = (uintptr)&l1[L1TABLEX(va, 2)] | PTEVALID | PTETABLE;
-}
-
-void
mmu1init(void)
{
m->mmutop = mallocalign(L1TOPSIZE, BY2PG, 0, 0);
@@ -137,7 +79,6 @@
pa = conf.mem[0].base;
assert((pa % BY2PG) == 0);
- assert(pa < INITMAP);
conf.mem[0].base += BY2PG;
return KADDR(pa);
}
@@ -179,7 +120,7 @@
}
}
-static void
+void
kmapram(uintptr base, uintptr limit)
{
if(base < (uintptr)-KZERO && limit > (uintptr)-KZERO){
@@ -194,21 +135,6 @@
l1map((uintptr)kmapaddr(base), base, limit,
PTEWRITE | PTEPXN | PTEUXN | PTESH(SHARE_INNER));
-}
-
-void
-meminit(void)
-{
- char *p;
-
- conf.mem[0].base = PGROUND((uintptr)end - KZERO);
- conf.mem[0].limit = GiB + 128 * MiB;
- if(p = getconf("*maxmem"))
- conf.mem[0].limit = strtoull(p, 0, 0);
-
- kmapram(conf.mem[0].base, conf.mem[0].limit);
-
- conf.mem[0].npage = (conf.mem[0].limit - conf.mem[0].base)/BY2PG;
}
uintptr
--- a/sys/src/9/imx8/fns.h
+++ b/sys/src/9/imx8/fns.h
@@ -69,16 +69,17 @@
#define VA(k) ((uintptr)(k))
extern KMap *kmap(Page*);
extern void kunmap(KMap*);
+extern void kmapram(uintptr, uintptr);
extern uintptr mmukmap(uintptr, uintptr, usize);
extern void* vmap(uvlong, vlong);
extern void vunmap(void*, vlong);
+extern void mmu1init(void);
+extern void putasid(Proc*);
-extern void mmu0init(uintptr*);
+/* mem */
extern void mmuidmap(uintptr*);
-extern void mmu1init(void);
+extern void mmu0init(uintptr*);
extern void meminit(void);
-
-extern void putasid(Proc*);
extern void* ucalloc(usize);
--- /dev/null
+++ b/sys/src/9/imx8/mem.c
@@ -1,0 +1,117 @@
+#include "u.h"
+#include "../port/lib.h"
+#include "mem.h"
+#include "dat.h"
+#include "fns.h"
+
+#define INITMAP (ROUND((uintptr)end + BY2PG, PGLSZ(1))-KZERO)
+
+/*
+ * Create initial identity map in top-level page table
+ * (L1BOT) for TTBR0. This page table is only used until
+ * mmu1init() loads m->mmutop.
+ */
+void
+mmuidmap(uintptr *l1bot)
+{
+ uintptr pa, pe, attr;
+
+ /* VDRAM */
+ attr = PTEWRITE | PTEAF | PTEKERNEL | PTEUXN | PTESH(SHARE_INNER);
+ pe = -KZERO;
+ for(pa = VDRAM - KZERO; pa < pe; pa += PGLSZ(PTLEVELS-1))
+ l1bot[PTLX(pa, PTLEVELS-1)] = pa | PTEVALID | PTEBLOCK | attr;
+}
+
+/*
+ * Create initial shared kernel page table (L1) for TTBR1.
+ * This page table coveres the INITMAP and VIRTIO,
+ * and later we fill the ram mappings in meminit().
+ */
+void
+mmu0init(uintptr *l1)
+{
+ uintptr va, pa, pe, attr;
+
+ /* DRAM - INITMAP */
+ attr = PTEWRITE | PTEAF | PTEKERNEL | PTEUXN | PTESH(SHARE_INNER);
+ pe = INITMAP;
+ for(pa = VDRAM - KZERO, va = VDRAM; pa < pe; pa += PGLSZ(1), va += PGLSZ(1))
+ l1[PTL1X(va, 1)] = pa | PTEVALID | PTEBLOCK | attr;
+
+ /* VIRTIO */
+ attr = PTEWRITE | PTEAF | PTEKERNEL | PTEUXN | PTEPXN | PTESH(SHARE_OUTER) | PTEDEVICE;
+ pe = VDRAM - KZERO;
+ for(pa = VIRTIO - KZERO, va = VIRTIO; pa < pe; pa += PGLSZ(1), va += PGLSZ(1)){
+ if(((pa|va) & PGLSZ(1)-1) != 0){
+ l1[PTL1X(va, 1)] = (uintptr)l1 | PTEVALID | PTETABLE;
+ for(; pa < pe && ((va|pa) & PGLSZ(1)-1) != 0; pa += PGLSZ(0), va += PGLSZ(0)){
+ assert(l1[PTLX(va, 0)] == 0);
+ l1[PTLX(va, 0)] = pa | PTEVALID | PTEPAGE | attr;
+ }
+ break;
+ }
+ l1[PTL1X(va, 1)] = pa | PTEVALID | PTEBLOCK | attr;
+ }
+
+ if(PTLEVELS > 2)
+ for(va = KSEG0; va != 0; va += PGLSZ(2))
+ l1[PTL1X(va, 2)] = (uintptr)&l1[L1TABLEX(va, 1)] | PTEVALID | PTETABLE;
+
+ if(PTLEVELS > 3)
+ for(va = KSEG0; va != 0; va += PGLSZ(3))
+ l1[PTL1X(va, 3)] = (uintptr)&l1[L1TABLEX(va, 2)] | PTEVALID | PTETABLE;
+}
+
+void
+meminit(void)
+{
+ /* DDR Memory (All modules) */
+ conf.mem[0].base = PGROUND((uintptr)end - KZERO);
+
+ /* exclude uncached dram for ucalloc() */
+ conf.mem[0].limit = UCRAMBASE;
+ conf.mem[1].base = UCRAMBASE+UCRAMSIZE;
+
+ conf.mem[1].limit = 0x100000000ULL;
+
+ /* DDR Memory (Quad-A53 only) */
+ conf.mem[2].base = 0x100000000ULL;
+ conf.mem[2].limit = 0x140000000ULL;
+
+ kmapram(conf.mem[0].base, conf.mem[0].limit);
+ kmapram(conf.mem[1].base, conf.mem[1].limit);
+ kmapram(conf.mem[2].base, conf.mem[2].limit);
+
+ conf.mem[0].npage = (conf.mem[0].limit - conf.mem[0].base)/BY2PG;
+ conf.mem[1].npage = (conf.mem[1].limit - conf.mem[1].base)/BY2PG;
+ conf.mem[2].npage = (conf.mem[2].limit - conf.mem[2].base)/BY2PG;
+}
+
+static void*
+ucramalloc(usize size, uintptr align, uint attr)
+{
+ static uintptr top = UCRAMBASE + UCRAMSIZE;
+ static Lock lk;
+ uintptr va, pg;
+
+ lock(&lk);
+ top -= size;
+ size += top & align-1;
+ top &= -align;
+ if(top < UCRAMBASE)
+ panic("ucramalloc: need %zd bytes", size);
+ va = KZERO + top;
+ pg = va & -BY2PG;
+ if(pg != ((va+size) & -BY2PG))
+ mmukmap(pg | attr, pg - KZERO, PGROUND(size));
+ unlock(&lk);
+
+ return (void*)va;
+}
+
+void*
+ucalloc(usize size)
+{
+ return ucramalloc(size, 8, PTEUNCACHED);
+}
--- a/sys/src/9/imx8/mkfile
+++ b/sys/src/9/imx8/mkfile
@@ -47,6 +47,7 @@
fpu.$O\
main.$O\
mmu.$O\
+ mem.$O\
sysreg.$O\
random.$O\
trap.$O\
@@ -102,7 +103,7 @@
pciimx.$O: ../port/pci.h
usbxhciimx.$O: ../port/usbxhci.h
-l.$O main.$O mmu.$O clock.$O gic.$O cache.v8.$O fpu.$O trap.$O rebootcode.$O: ../arm64/sysreg.h
+l.$O main.$O clock.$O gic.$O cache.v8.$O fpu.$O trap.$O rebootcode.$O: ../arm64/sysreg.h
initcode.out: init9.$O initcode.$O /$objtype/lib/libc.a
$LD -l -R1 -s -o $target $prereq
--- a/sys/src/9/imx8/mmu.c
+++ /dev/null
@@ -1,488 +1,0 @@
-#include "u.h"
-#include "../port/lib.h"
-#include "mem.h"
-#include "dat.h"
-#include "fns.h"
-#include "../arm64/sysreg.h"
-
-#define INITMAP (ROUND((uintptr)end + BY2PG, PGLSZ(1))-KZERO)
-
-/*
- * Create initial identity map in top-level page table
- * (L1BOT) for TTBR0. This page table is only used until
- * mmu1init() loads m->mmutop.
- */
-void
-mmuidmap(uintptr *l1bot)
-{
- uintptr pa, pe, attr;
-
- /* VDRAM */
- attr = PTEWRITE | PTEAF | PTEKERNEL | PTEUXN | PTESH(SHARE_INNER);
- pe = -KZERO;
- for(pa = VDRAM - KZERO; pa < pe; pa += PGLSZ(PTLEVELS-1))
- l1bot[PTLX(pa, PTLEVELS-1)] = pa | PTEVALID | PTEBLOCK | attr;
-}
-
-/*
- * Create initial shared kernel page table (L1) for TTBR1.
- * This page table coveres the INITMAP and VIRTIO,
- * and later we fill the ram mappings in meminit().
- */
-void
-mmu0init(uintptr *l1)
-{
- uintptr va, pa, pe, attr;
-
- /* DRAM - INITMAP */
- attr = PTEWRITE | PTEAF | PTEKERNEL | PTEUXN | PTESH(SHARE_INNER);
- pe = INITMAP;
- for(pa = VDRAM - KZERO, va = VDRAM; pa < pe; pa += PGLSZ(1), va += PGLSZ(1))
- l1[PTL1X(va, 1)] = pa | PTEVALID | PTEBLOCK | attr;
-
- /* VIRTIO */
- attr = PTEWRITE | PTEAF | PTEKERNEL | PTEUXN | PTEPXN | PTESH(SHARE_OUTER) | PTEDEVICE;
- pe = VDRAM - KZERO;
- for(pa = VIRTIO - KZERO, va = VIRTIO; pa < pe; pa += PGLSZ(1), va += PGLSZ(1)){
- if(((pa|va) & PGLSZ(1)-1) != 0){
- l1[PTL1X(va, 1)] = (uintptr)l1 | PTEVALID | PTETABLE;
- for(; pa < pe && ((va|pa) & PGLSZ(1)-1) != 0; pa += PGLSZ(0), va += PGLSZ(0)){
- assert(l1[PTLX(va, 0)] == 0);
- l1[PTLX(va, 0)] = pa | PTEVALID | PTEPAGE | attr;
- }
- break;
- }
- l1[PTL1X(va, 1)] = pa | PTEVALID | PTEBLOCK | attr;
- }
-
- if(PTLEVELS > 2)
- for(va = KSEG0; va != 0; va += PGLSZ(2))
- l1[PTL1X(va, 2)] = (uintptr)&l1[L1TABLEX(va, 1)] | PTEVALID | PTETABLE;
-
- if(PTLEVELS > 3)
- for(va = KSEG0; va != 0; va += PGLSZ(3))
- l1[PTL1X(va, 3)] = (uintptr)&l1[L1TABLEX(va, 2)] | PTEVALID | PTETABLE;
-}
-
-void
-mmu1init(void)
-{
- m->mmutop = mallocalign(L1TOPSIZE, BY2PG, 0, 0);
- if(m->mmutop == nil)
- panic("mmu1init: no memory for mmutop");
- memset(m->mmutop, 0, L1TOPSIZE);
- mmuswitch(nil);
-}
-
-/* KZERO maps the first 1GB of ram */
-uintptr
-paddr(void *va)
-{
- if((uintptr)va >= KZERO)
- return (uintptr)va-KZERO;
- panic("paddr: va=%#p pc=%#p", va, getcallerpc(&va));
- return 0;
-}
-
-uintptr
-cankaddr(uintptr pa)
-{
- if(pa < (uintptr)-KZERO)
- return -KZERO - pa;
- return 0;
-}
-
-void*
-kaddr(uintptr pa)
-{
- if(pa < (uintptr)-KZERO)
- return (void*)(pa + KZERO);
- panic("kaddr: pa=%#p pc=%#p", pa, getcallerpc(&pa));
- return nil;
-}
-
-static void*
-kmapaddr(uintptr pa)
-{
- if(pa < (uintptr)-KZERO)
- return (void*)(pa + KZERO);
- if(pa < (VDRAM - KZERO) || pa >= (VDRAM - KZERO) + (KMAPEND - KMAP))
- panic("kmapaddr: pa=%#p pc=%#p", pa, getcallerpc(&pa));
- return (void*)(pa + KMAP - (VDRAM - KZERO));
-}
-
-KMap*
-kmap(Page *p)
-{
- return kmapaddr(p->pa);
-}
-
-void
-kunmap(KMap*)
-{
-}
-
-void
-kmapinval(void)
-{
-}
-
-static void*
-rampage(void)
-{
- uintptr pa;
-
- if(conf.npage)
- return mallocalign(BY2PG, BY2PG, 0, 0);
-
- pa = conf.mem[0].base;
- assert((pa % BY2PG) == 0);
- assert(pa < INITMAP);
- conf.mem[0].base += BY2PG;
- return KADDR(pa);
-}
-
-static void
-l1map(uintptr va, uintptr pa, uintptr pe, uintptr attr)
-{
- uintptr *l1, *l0;
-
- assert(pa < pe);
-
- va &= -BY2PG;
- pa &= -BY2PG;
- pe = PGROUND(pe);
-
- attr |= PTEKERNEL | PTEAF;
-
- l1 = (uintptr*)L1;
-
- while(pa < pe){
- if(l1[PTL1X(va, 1)] == 0 && (pe-pa) >= PGLSZ(1) && ((va|pa) & PGLSZ(1)-1) == 0){
- l1[PTL1X(va, 1)] = PTEVALID | PTEBLOCK | pa | attr;
- va += PGLSZ(1);
- pa += PGLSZ(1);
- continue;
- }
- if(l1[PTL1X(va, 1)] & PTEVALID) {
- assert((l1[PTL1X(va, 1)] & PTETABLE) == PTETABLE);
- l0 = KADDR(l1[PTL1X(va, 1)] & -PGLSZ(0));
- } else {
- l0 = rampage();
- memset(l0, 0, BY2PG);
- l1[PTL1X(va, 1)] = PTEVALID | PTETABLE | PADDR(l0);
- }
- assert(l0[PTLX(va, 0)] == 0);
- l0[PTLX(va, 0)] = PTEVALID | PTEPAGE | pa | attr;
- va += BY2PG;
- pa += BY2PG;
- }
-}
-
-static void
-kmapram(uintptr base, uintptr limit)
-{
- if(base < (uintptr)-KZERO && limit > (uintptr)-KZERO){
- kmapram(base, (uintptr)-KZERO);
- kmapram((uintptr)-KZERO, limit);
- return;
- }
- if(base < INITMAP)
- base = INITMAP;
- if(base >= limit || limit <= INITMAP)
- return;
-
- l1map((uintptr)kmapaddr(base), base, limit,
- PTEWRITE | PTEPXN | PTEUXN | PTESH(SHARE_INNER));
-}
-
-void
-meminit(void)
-{
- /* DDR Memory (All modules) */
- conf.mem[0].base = PGROUND((uintptr)end - KZERO);
-
- /* exclude uncached dram for ucalloc() */
- conf.mem[0].limit = UCRAMBASE;
- conf.mem[1].base = UCRAMBASE+UCRAMSIZE;
-
- conf.mem[1].limit = 0x100000000ULL;
-
- /* DDR Memory (Quad-A53 only) */
- conf.mem[2].base = 0x100000000ULL;
- conf.mem[2].limit = 0x140000000ULL;
-
- kmapram(conf.mem[0].base, conf.mem[0].limit);
- kmapram(conf.mem[1].base, conf.mem[1].limit);
- kmapram(conf.mem[2].base, conf.mem[2].limit);
-
- conf.mem[0].npage = (conf.mem[0].limit - conf.mem[0].base)/BY2PG;
- conf.mem[1].npage = (conf.mem[1].limit - conf.mem[1].base)/BY2PG;
- conf.mem[2].npage = (conf.mem[2].limit - conf.mem[2].base)/BY2PG;
-}
-
-uintptr
-mmukmap(uintptr va, uintptr pa, usize size)
-{
- uintptr attr, off;
-
- if(va == 0)
- return 0;
-
- off = pa & BY2PG-1;
-
- attr = va & PTEMA(7);
- attr |= PTEWRITE | PTEUXN | PTEPXN | PTESH(SHARE_OUTER);
-
- va &= -BY2PG;
- pa &= -BY2PG;
-
- l1map(va, pa, pa + off + size, attr);
- flushtlb();
-
- return va + off;
-}
-
-void*
-vmap(uvlong pa, vlong size)
-{
- static uintptr base = VMAP;
- uvlong pe = pa + size;
- uintptr va;
-
- va = base;
- base += PGROUND(pe) - (pa & -BY2PG);
-
- return (void*)mmukmap(va | PTEDEVICE, pa, size);
-}
-
-void
-vunmap(void *, vlong)
-{
-}
-
-static uintptr*
-mmuwalk(uintptr va, int level)
-{
- uintptr *table, pte;
- Page *pg;
- int i, x;
-
- x = PTLX(va, PTLEVELS-1);
- table = m->mmutop;
- for(i = PTLEVELS-2; i >= level; i--){
- pte = table[x];
- if(pte & PTEVALID) {
- if(pte & (0xFFFFULL<<48))
- iprint("strange pte %#p va %#p\n", pte, va);
- pte &= ~(0xFFFFULL<<48 | BY2PG-1);
- } else {
- pg = up->mmufree;
- if(pg == nil)
- return nil;
- up->mmufree = pg->next;
- pg->va = va & -PGLSZ(i+1);
- if((pg->next = up->mmuhead[i+1]) == nil)
- up->mmutail[i+1] = pg;
- up->mmuhead[i+1] = pg;
- pte = pg->pa;
- memset(kmapaddr(pte), 0, BY2PG);
- coherence();
- table[x] = pte | PTEVALID | PTETABLE;
- }
- table = kmapaddr(pte);
- x = PTLX(va, (uintptr)i);
- }
- return &table[x];
-}
-
-static Proc *asidlist[256];
-
-static int
-allocasid(Proc *p)
-{
- static Lock lk;
- Proc *x;
- int a;
-
- lock(&lk);
- a = p->asid;
- if(a < 0)
- a = -a;
- if(a == 0)
- a = p->pid;
- for(;; a++){
- a %= nelem(asidlist);
- if(a == 0)
- continue; // reserved
- x = asidlist[a];
- if(x == p || x == nil || (x->asid < 0 && x->mach == nil))
- break;
- }
- p->asid = a;
- asidlist[a] = p;
- unlock(&lk);
-
- return x != p;
-}
-
-static void
-freeasid(Proc *p)
-{
- int a;
-
- a = p->asid;
- if(a < 0)
- a = -a;
- if(a > 0 && asidlist[a] == p)
- asidlist[a] = nil;
- p->asid = 0;
-}
-
-void
-putasid(Proc *p)
-{
- /*
- * Prevent the following scenario:
- * pX sleeps on cpuA, leaving its page tables in mmutop
- * pX wakes up on cpuB, and exits, freeing its page tables
- * pY on cpuB allocates a freed page table page and overwrites with data
- * cpuA takes an interrupt, and is now running with bad page tables
- * In theory this shouldn't hurt because only user address space tables
- * are affected, and mmuswitch will clear mmutop before a user process is
- * dispatched. But empirically it correlates with weird problems, eg
- * resetting of the core clock at 0x4000001C which confuses local timers.
- */
- if(conf.nmach > 1)
- mmuswitch(nil);
-
- if(p->asid > 0)
- p->asid = -p->asid;
-}
-
-void
-putmmu(uintptr va, uintptr pa, Page *pg)
-{
- uintptr *pte, old;
- int s;
-
- s = splhi();
- while((pte = mmuwalk(va, 0)) == nil){
- spllo();
- up->mmufree = newpage(0, nil, 0);
- splhi();
- }
- old = *pte;
- *pte = 0;
- if((old & PTEVALID) != 0)
- flushasidvall((uvlong)up->asid<<48 | va>>12);
- else
- flushasidva((uvlong)up->asid<<48 | va>>12);
- *pte = pa | PTEPAGE | PTEUSER | PTEPXN | PTENG | PTEAF |
- (((pa & PTEMA(7)) == PTECACHED)? PTESH(SHARE_INNER): PTESH(SHARE_OUTER));
- if(needtxtflush(pg)){
- cachedwbinvse(kmap(pg), BY2PG);
- cacheiinvse((void*)va, BY2PG);
- donetxtflush(pg);
- }
- splx(s);
-}
-
-static void
-mmufree(Proc *p)
-{
- int i;
-
- freeasid(p);
-
- for(i=1; i<PTLEVELS; i++){
- if(p->mmuhead[i] == nil)
- break;
- p->mmutail[i]->next = p->mmufree;
- p->mmufree = p->mmuhead[i];
- p->mmuhead[i] = p->mmutail[i] = nil;
- }
-}
-
-void
-mmuswitch(Proc *p)
-{
- uintptr va;
- Page *t;
-
- for(va = UZERO; va < USTKTOP; va += PGLSZ(PTLEVELS-1))
- m->mmutop[PTLX(va, PTLEVELS-1)] = 0;
-
- if(p == nil){
- setttbr(PADDR(m->mmutop));
- return;
- }
-
- if(p->newtlb){
- mmufree(p);
- p->newtlb = 0;
- }
-
- if(allocasid(p))
- flushasid((uvlong)p->asid<<48);
-
- setttbr((uvlong)p->asid<<48 | PADDR(m->mmutop));
-
- for(t = p->mmuhead[PTLEVELS-1]; t != nil; t = t->next){
- va = t->va;
- m->mmutop[PTLX(va, PTLEVELS-1)] = t->pa | PTEVALID | PTETABLE;
- }
-}
-
-void
-mmurelease(Proc *p)
-{
- mmuswitch(nil);
- mmufree(p);
- freepages(p->mmufree, nil, 0);
- p->mmufree = nil;
-}
-
-void
-flushmmu(void)
-{
- int x;
-
- x = splhi();
- up->newtlb = 1;
- mmuswitch(up);
- splx(x);
-}
-
-void
-checkmmu(uintptr, uintptr)
-{
-}
-
-static void*
-ucramalloc(usize size, uintptr align, uint attr)
-{
- static uintptr top = UCRAMBASE + UCRAMSIZE;
- static Lock lk;
- uintptr va, pg;
-
- lock(&lk);
- top -= size;
- size += top & align-1;
- top &= -align;
- if(top < UCRAMBASE)
- panic("ucramalloc: need %zd bytes", size);
- va = KZERO + top;
- pg = va & -BY2PG;
- if(pg != ((va+size) & -BY2PG))
- mmukmap(pg | attr, pg - KZERO, PGROUND(size));
- unlock(&lk);
-
- return (void*)va;
-}
-
-void*
-ucalloc(usize size)
-{
- return ucramalloc(size, 8, PTEUNCACHED);
-}