From 83d8a24215ddf66ee64fc4704151571b2e952685 Mon Sep 17 00:00:00 2001 From: cinap_lenrek Date: Mon, 29 Jan 2018 08:26:42 +0100 Subject: pc64: fix kmap() and invlpg() flushing tlb once the index wraps arround is not enougth as in use pte's can be speculatively loaded. so instead use invlpg() and explicitely invalidate the tlb of the page mapped. this fixes wired mount cache corruption for reads approaching 2MB which is the size of the KMAP window. invlpg() was broken, using wrong operand type. --- sys/src/9/pc64/l.s | 7 ++----- sys/src/9/pc64/mmu.c | 8 +++----- 2 files changed, 5 insertions(+), 10 deletions(-) diff --git a/sys/src/9/pc64/l.s b/sys/src/9/pc64/l.s index a1cc976f3..61276f0e7 100644 --- a/sys/src/9/pc64/l.s +++ b/sys/src/9/pc64/l.s @@ -449,11 +449,8 @@ TEXT _wrmsrinst(SB), $0 MOVQ BP, AX /* BP set to -1 if traped */ RET -TEXT invlpg(SB), 1, $-4 /* INVLPG va+0(FP) */ - MOVQ RARG, va+0(FP) - - INVLPG va+0(FP) - +TEXT invlpg(SB), 1, $-4 + INVLPG (RARG) RET TEXT wbinvd(SB), 1, $-4 diff --git a/sys/src/9/pc64/mmu.c b/sys/src/9/pc64/mmu.c index 6f27ac25e..02f96d55a 100644 --- a/sys/src/9/pc64/mmu.c +++ b/sys/src/9/pc64/mmu.c @@ -485,15 +485,13 @@ kmap(Page *page) return (KMap*)KADDR(pa); x = splhi(); - va = KMAP + ((uintptr)up->kmapindex << PGSHIFT); + va = KMAP + (((uintptr)up->kmapindex++ << PGSHIFT) & (KMAPSIZE-1)); pte = mmuwalk(m->pml4, va, 0, 1); - if(pte == 0 || *pte & PTEVALID) + if(pte == 0 || (*pte & PTEVALID) != 0) panic("kmap: pa=%#p va=%#p", pa, va); *pte = pa | PTEWRITE|PTEVALID; - up->kmapindex = (up->kmapindex + 1) % (1<kmapindex == 0) - mmuflushtlb(); splx(x); + invlpg(va); return (KMap*)va; } -- cgit v1.2.3