ref: c2ec061689ec8825c9af36944738f951858d5e70
parent: 9c2e8e2b13b0d01b7adf88b61af6edfbddd872c1
author: cinap_lenrek <cinap_lenrek@felloff.net>
date: Wed Jan 17 14:48:39 EST 2024
pc64: set accessed and dirty bits in PTE to avoid write-back We do not use the accessed and dirty bits in page table entries so we can just always set them. this avoids that the cpu needs to atomically write back to the page table when setting these bits.
--- a/sys/src/9/pc64/l.s
+++ b/sys/src/9/pc64/l.s
@@ -124,7 +124,7 @@
MOVL SI, AX /* PML4 */
MOVL AX, DX
- ADDL $(PTSZ|PTEWRITE|PTEVALID), DX /* PDP at PML4 + PTSZ */
+ ADDL $(PTEACCESSED|PTEDIRTY|PTSZ|PTEWRITE|PTEVALID), DX /* PDP at PML4 + PTSZ */
MOVL DX, PML4O(0)(AX) /* PML4E for double-map */
MOVL DX, PML4O(KZERO)(AX) /* PML4E for KZERO */
--- a/sys/src/9/pc64/mem.h
+++ b/sys/src/9/pc64/mem.h
@@ -156,6 +156,8 @@
#define PTERONLY (0ull<<1)
#define PTEKERNEL (0ull<<2)
#define PTEUSER (1ull<<2)
+#define PTEACCESSED (1ull<<5)
+#define PTEDIRTY (1ull<<6)
#define PTESIZE (1ull<<7)
#define PTEGLOBAL (1ull<<8)
#define PTENOEXEC ((uvlong)m->havenx<<63)
--- a/sys/src/9/pc64/mmu.c
+++ b/sys/src/9/pc64/mmu.c
@@ -341,6 +341,7 @@
flags = pa;
pa = PPN(pa);
flags -= pa;
+ flags |= PTEACCESSED|PTEDIRTY;
if(va >= KZERO)
flags |= PTEGLOBAL;
while(size > 0){
@@ -504,7 +505,7 @@
if(pte == 0)
panic("putmmu: bug: va=%#p pa=%#p", va, pa);
old = *pte;
- *pte = pa | PTEUSER;
+ *pte = pa | PTEACCESSED|PTEDIRTY|PTEUSER;
splx(x);
if(old & PTEVALID)
invlpg(va);
@@ -553,7 +554,7 @@
pte = mmuwalk(m->pml4, va, 0, 1);
if(pte == 0 || (*pte & PTEVALID) != 0)
panic("kmap: pa=%#p va=%#p", pa, va);
- *pte = pa | PTEWRITE|PTENOEXEC|PTEVALID;
+ *pte = pa | PTEACCESSED|PTEDIRTY|PTEWRITE|PTENOEXEC|PTEVALID;
splx(x);
invlpg(va);
return (KMap*)va;