ref: 83d8a24215ddf66ee64fc4704151571b2e952685
parent: b5362dc72220a4ac80678cc00e4289befae337e3
author: cinap_lenrek <cinap_lenrek@felloff.net>
date: Mon Jan 29 03:26:42 EST 2018
pc64: fix kmap() and invlpg() flushing tlb once the index wraps arround is not enougth as in use pte's can be speculatively loaded. so instead use invlpg() and explicitely invalidate the tlb of the page mapped. this fixes wired mount cache corruption for reads approaching 2MB which is the size of the KMAP window. invlpg() was broken, using wrong operand type.
--- a/sys/src/9/pc64/l.s
+++ b/sys/src/9/pc64/l.s
@@ -449,11 +449,8 @@
MOVQ BP, AX /* BP set to -1 if traped */
RET
-TEXT invlpg(SB), 1, $-4 /* INVLPG va+0(FP) */
- MOVQ RARG, va+0(FP)
-
- INVLPG va+0(FP)
-
+TEXT invlpg(SB), 1, $-4
+ INVLPG (RARG)
RET
TEXT wbinvd(SB), 1, $-4
--- a/sys/src/9/pc64/mmu.c
+++ b/sys/src/9/pc64/mmu.c
@@ -485,15 +485,13 @@
return (KMap*)KADDR(pa);
x = splhi();
- va = KMAP + ((uintptr)up->kmapindex << PGSHIFT);
+ va = KMAP + (((uintptr)up->kmapindex++ << PGSHIFT) & (KMAPSIZE-1));
pte = mmuwalk(m->pml4, va, 0, 1);
- if(pte == 0 || *pte & PTEVALID)
+ if(pte == 0 || (*pte & PTEVALID) != 0)
panic("kmap: pa=%#p va=%#p", pa, va);
*pte = pa | PTEWRITE|PTEVALID;
- up->kmapindex = (up->kmapindex + 1) % (1<<PTSHIFT);
- if(up->kmapindex == 0)
- mmuflushtlb();
splx(x);
+ invlpg(va);
return (KMap*)va;
}