ref: 222525f867c32c47154574c437da6d36998fd81c
parent: 27118028659fce0c30c3eeb88a5cd1264585cbe6
author: cinap_lenrek <cinap_lenrek@felloff.net>
date: Sun Feb 2 13:01:13 EST 2014
pc64: track per process kmap page tables in separate MMU list. we have to keep kmap page tables in ther own list because user tables are subject to (virtual) tlb flushing. we never free kmap page tables except in mmurelease() where we just link the kmap mmu list in front of the user mmus and call mmufree() which will free all the mmu's of the process.
--- a/sys/src/9/pc64/dat.h
+++ b/sys/src/9/pc64/dat.h
@@ -137,8 +137,11 @@
#define NCOLOR 1
struct PMMU
{- MMU *mmuhead;
- MMU *mmutail;
+ MMU* mmuhead;
+ MMU* mmutail;
+ MMU* kmaphead;
+ MMU* kmaptail;
+ int kmapcount;
int mmucount;
};
--- a/sys/src/9/pc64/mem.h
+++ b/sys/src/9/pc64/mem.h
@@ -57,8 +57,8 @@
#define VMAP (0xffffffff00000000ull) /* 2GB identity map of upper 2GB ram */
#define VMAPSIZE (2*GiB)
-#define KMAP (0xffffff7f00000000ull)
-#define KMAPSIZE (512*GiB)
+#define KMAP (0xffffff7f00000000ull) /* 2MB for per process temporary kernel mappings */
+#define KMAPSIZE (2*MiB)
/*
* Fundamental addresses - bottom 64kB saved for return to real mode
--- a/sys/src/9/pc64/mmu.c
+++ b/sys/src/9/pc64/mmu.c
@@ -227,25 +227,36 @@
return 0;
pte = PTEWRITE|PTEVALID;
if(va < VMAP){- if(va < TSTKTOP)
+ if(va < TSTKTOP){pte |= PTEUSER;
- p = mmualloc();
- p->index = x;
- p->level = i;
- if(i == PML4E){- /* PML4 entries linked to head */
- p->next = up->mmuhead;
- if(p->next == nil)
- up->mmutail = p;
- up->mmuhead = p;
- if(p->index <= PTLX(TSTKTOP, 3))
+
+ p = mmualloc();
+ p->index = x;
+ p->level = i;
+ if(i == PML4E){+ if((p->next = up->mmuhead) == nil)
+ up->mmutail = p;
+ up->mmuhead = p;
m->mmumap[p->index/MAPBITS] |= 1ull<<(p->index%MAPBITS);
- } else {- /* PDP and PD entries linked to tail */
- up->mmutail->next = p;
- up->mmutail = p;
- }
- up->mmucount++;
+ } else {+ up->mmutail->next = p;
+ up->mmutail = p;
+ }
+ up->mmucount++;
+ } else if(va >= KMAP && va < (KMAP+KMAPSIZE)) {+ p = mmualloc();
+ p->index = x;
+ p->level = i;
+ if(i == PML4E){+ up->kmaptail = p;
+ up->kmaphead = p;
+ } else {+ up->kmaptail->next = p;
+ up->kmaptail = p;
+ }
+ up->kmapcount++;
+ } else
+ return 0;
page = p->page;
} else if(didmmuinit) {page = mallocalign(PTSZ, BY2PG, 0, 0);
@@ -375,7 +386,6 @@
void
mmuswitch(Proc *proc)
{- uintptr pte;
MMU *p;
mmuzap();
@@ -383,13 +393,11 @@
mmufree(proc);
proc->newtlb = 0;
}
- for(p = proc->mmuhead; p && p->level==PML4E; p = p->next){- pte = PADDR(p->page) | PTEWRITE|PTEVALID;
- if(p->index <= PTLX(TSTKTOP, 3)){- m->mmumap[p->index/MAPBITS] |= 1ull<<(p->index%MAPBITS);
- pte |= PTEUSER;
- }
- m->pml4[p->index] = pte;
+ if((p = proc->kmaphead) != nil)
+ m->pml4[PTLX(KMAP, 3)] = PADDR(p->page) | PTEWRITE|PTEVALID;
+ for(p = proc->mmuhead; p != nil && p->level == PML4E; p = p->next){+ m->mmumap[p->index/MAPBITS] |= 1ull<<(p->index%MAPBITS);
+ m->pml4[p->index] = PADDR(p->page) | PTEUSER|PTEWRITE|PTEVALID;
}
taskswitch((uintptr)proc->kstack+KSTACK);
}
@@ -397,7 +405,18 @@
void
mmurelease(Proc *proc)
{+ MMU *p;
+
mmuzap();
+ if((p = proc->kmaptail) != nil){+ if((p->next = proc->mmuhead) == nil)
+ proc->mmutail = p;
+ proc->mmuhead = p;
+ proc->mmucount += proc->kmapcount;
+
+ proc->kmaphead = proc->kmaptail = nil;
+ proc->kmapcount = 0;
+ }
mmufree(proc);
taskswitch((uintptr)m+MACHSIZE);
}
@@ -410,10 +429,8 @@
x = splhi();
pte = mmuwalk(m->pml4, va, 0, 1);
- if(pte == 0){+ if(pte == 0)
panic("putmmu: bug: va=%#p pa=%#p", va, pa);- return;
- }
old = *pte;
*pte = pa | PTEVALID|PTEUSER;
splx(x);
--
⑨