ref: d9fa8fc465f9a4397db69ebc51a452f5557750e2
parent: bb5c5c1c35082bd1521ce1bd64db3c9f81468fb5
author: cinap_lenrek <cinap_lenrek@felloff.net>
date: Sat Aug 16 10:19:40 EDT 2025
kernel: change newpage() prototype the "clear" argument was almost never used, and with the new fillpage() function it can be done as easily by the caller. Put the virtual address argument "va" first, it is the most important. remove the Segment** pointer and just pass a QLock* as the last argument, signaling we hold locks and unlock it before sleeping. when that happend, newpage() now just returns nil to signal we slept (and lost the lock).
--- a/sys/src/9/arm64/mmu.c
+++ b/sys/src/9/arm64/mmu.c
@@ -283,7 +283,7 @@
s = splhi();
while((pte = mmuwalk(va, 0)) == nil){
spllo();
- up->mmufree = newpage(0, nil, 0);
+ up->mmufree = newpage(0, nil);
splhi();
}
old = *pte;
--- a/sys/src/9/bcm/mmu.c
+++ b/sys/src/9/bcm/mmu.c
@@ -206,11 +206,12 @@
/* l2 pages only have 256 entries - wastes 3K per 1M of address space */
if(up->mmul2cache == nil){
spllo();
- pg = newpage(1, 0, 0);
+ pg = newpage(0, nil);
splhi();
/* if newpage slept, we might be on a different cpu */
l1 = &m->mmul1[x];
pg->va = VA(kmap(pg));
+ memset((void*)pg->va, 0, BY2PG);
}else{
pg = up->mmul2cache;
up->mmul2cache = pg->next;
--- a/sys/src/9/cycv/mmu.c
+++ b/sys/src/9/cycv/mmu.c
@@ -145,7 +145,7 @@
if(p != nil)
up->mmufree = p->next;
else
- p = newpage(0, 0, 0);
+ p = newpage(0, nil);
p->daddr = L1RX(va);
p->next = up->mmuused;
up->mmuused = p;
@@ -272,7 +272,7 @@
if((*e & 3) == 0){
if(up->kmaptable != nil)
panic("kmaptable != nil");
- up->kmaptable = newpage(0, 0, 0);
+ up->kmaptable = newpage(0, nil);
s = splhi();
v = tmpmap(up->kmaptable->pa);
memset(v, 0, BY2PG);
--- a/sys/src/9/kw/mmu.c
+++ b/sys/src/9/kw/mmu.c
@@ -287,14 +287,15 @@
/* wasteful - l2 pages only have 256 entries - fix */
if(up->mmul2cache == nil){
/* auxpg since we don't need much? memset if so */
- pg = newpage(1, 0, 0);
+ pg = newpage(0, nil);
pg->va = VA(kmap(pg));
}
else{
pg = up->mmul2cache;
up->mmul2cache = pg->next;
- memset((void*)pg->va, 0, BY2PG);
}
+ memset((void*)pg->va, 0, BY2PG);
+
pg->daddr = x;
pg->next = up->mmul2;
up->mmul2 = pg;
--- a/sys/src/9/omap/mmu.c
+++ b/sys/src/9/omap/mmu.c
@@ -268,14 +268,15 @@
/* wasteful - l2 pages only have 256 entries - fix */
if(up->mmul2cache == nil){
/* auxpg since we don't need much? memset if so */
- pg = newpage(1, 0, 0);
+ pg = newpage(0, nil);
pg->va = VA(kmap(pg));
}
else{
pg = up->mmul2cache;
up->mmul2cache = pg->next;
- memset((void*)pg->va, 0, BY2PG);
}
+ memset((void*)pg->va, 0, BY2PG);
+
pg->daddr = x;
pg->next = up->mmul2;
up->mmul2 = pg;
--- a/sys/src/9/pc/mmu.c
+++ b/sys/src/9/pc/mmu.c
@@ -206,7 +206,7 @@
m->pdballoc++;
if(m->pdbpool == 0){
spllo();
- page = newpage(0, 0, 0);
+ page = newpage(0, nil);
page->va = (ulong)vpd;
splhi();
pdb = tmpmap(page);
@@ -432,7 +432,7 @@
if(!(vpd[PDX(va)]&PTEVALID)){
if(up->mmufree == 0){
spllo();
- page = newpage(0, 0, 0);
+ page = newpage(0, nil);
splhi();
}
else{
@@ -808,7 +808,7 @@
if(up->kmaptable != nil)
panic("kmaptable");
spllo();
- up->kmaptable = newpage(0, 0, 0);
+ up->kmaptable = newpage(0, nil);
splhi();
vpd[PDX(KMAP)] = up->kmaptable->pa|PTEWRITE|PTEVALID;
flushpg((ulong)kpt);
--- a/sys/src/9/pc/vgai81x.c
+++ b/sys/src/9/pc/vgai81x.c
@@ -192,7 +192,7 @@
if(scr->storage == 0){
CursorI81x *hwcurs;
- Page *pg = newpage(0, nil, 0);
+ Page *pg = newpage(0, nil);
scr->storage = (uintptr)vmap(pg->pa, BY2PG);
if(scr->storage == 0){
putpage(pg);
--- a/sys/src/9/port/cache.c
+++ b/sys/src/9/port/cache.c
@@ -426,7 +426,7 @@
}
if(fscache->pgref > TOTALPAGES)
pagereclaim(fscache);
- p = newpage(0, nil, pn*BY2PG);
+ p = newpage(pn*BY2PG, nil);
p->daddr = cacheaddr(m, pn);
cachedel(fscache, p->daddr);
cachepage(p, fscache);
--- a/sys/src/9/port/devsegment.c
+++ b/sys/src/9/port/devsegment.c
@@ -371,7 +371,7 @@
nexterror();
}
for(; va < s->top; va += BY2PG)
- segpage(s, newpage(1, nil, va));
+ segpage(s, fillpage(newpage(va, nil), 0));
poperror();
g->s = s;
} else
@@ -522,13 +522,12 @@
p->va = va;
va += BY2PG;
p->modref = 0;
- fillpage(p, 0);
if(waserror()){
while(++p <= l)
freepages(p, p, 1);
nexterror();
}
- segpage(s, p);
+ segpage(s, fillpage(p, 0));
poperror();
} while(p != l);
--- a/sys/src/9/port/fault.c
+++ b/sys/src/9/port/fault.c
@@ -99,7 +99,7 @@
putpage(new);
continue;
}
- new = newpage(0, nil, paddr + o);
+ new = newpage(paddr + o, nil);
new->daddr = daddr + o;
k = kmap(new);
n = ask - o;
@@ -177,11 +177,10 @@
case SG_SHARED: /* fill on demand */
case SG_STACK:
if(*pg == nil) {
- new = newpage(0, &s, addr);
- if(s == nil)
+ new = newpage(addr, s);
+ if(new == nil)
return -1;
- fillpage(new, (s->type&SG_TYPE)==SG_STACK? 0xfe: 0);
- *pg = new;
+ *pg = fillpage(new, (s->type&SG_TYPE)==SG_STACK? 0xfe: 0);
s->used++;
}
/* wet floor */
@@ -203,8 +202,8 @@
if(swapimage != nil && old->image == swapimage && (old->ref + swapcount(old->daddr)) == 1)
uncachepage(old);
if(old->ref > 1 || old->image != nil) {
- new = newpage(0, &s, addr);
- if(s == nil)
+ new = newpage(addr, s);
+ if(new == nil)
return -1;
copypage(old, new);
settxtflush(new, s->flushme);
--- a/sys/src/9/port/page.c
+++ b/sys/src/9/port/page.c
@@ -169,7 +169,7 @@
}
Page*
-newpage(int clear, Segment **s, uintptr va)
+newpage(uintptr va, QLock *locked)
{
Page *p, **l;
int color;
@@ -177,8 +177,8 @@
lock(&palloc);
while(!ispages(nil)){
unlock(&palloc);
- if(s != nil)
- qunlock(*s);
+ if(locked)
+ qunlock(locked);
if(!waserror()){
Rendezq *q;
@@ -195,15 +195,14 @@
}
/*
- * If called from fault and we lost the segment from
+ * If called from fault and we lost the lock from
* underneath don't waste time allocating and freeing
* a page. Fault will call newpage again when it has
- * reacquired the segment locks
+ * reacquired the locks
*/
- if(s != nil){
- *s = nil;
+ if(locked)
return nil;
- }
+
lock(&palloc);
}
@@ -230,9 +229,6 @@
p->va = va;
p->modref = 0;
inittxtflush(p);
-
- if(clear)
- fillpage(p, 0);
return p;
}
--- a/sys/src/9/port/portfns.h
+++ b/sys/src/9/port/portfns.h
@@ -222,7 +222,7 @@
Mhead* newmhead(Chan*);
Mount* newmount(Chan*, int, char*);
Image* newimage(ulong);
-Page* newpage(int, Segment **, uintptr);
+Page* newpage(uintptr, QLock*);
Path* newpath(char*);
Pgrp* newpgrp(void);
Rgrp* newrgrp(void);
--- a/sys/src/9/port/userinit.c
+++ b/sys/src/9/port/userinit.c
@@ -51,9 +51,10 @@
up->seg[SSEG] = newseg(SG_STACK | SG_NOEXEC, USTKTOP-USTKSIZE, USTKSIZE / BY2PG);
up->seg[TSEG] = newseg(SG_TEXT | SG_RONLY, UTZERO, 1);
up->seg[TSEG]->flushme = 1;
- p = newpage(1, nil, UTZERO);
+ p = newpage(UTZERO, nil);
k = kmap(p);
- memmove((void*)VA(k), initcode, sizeof(initcode));
+ memmove((uchar*)VA(k), initcode, sizeof(initcode));
+ memset((uchar*)VA(k)+sizeof(initcode), 0, BY2PG-sizeof(initcode));
kunmap(k);
segpage(up->seg[TSEG], p);
--- a/sys/src/9/teg2/mmu.c
+++ b/sys/src/9/teg2/mmu.c
@@ -513,14 +513,15 @@
/* wasteful - l2 pages only have 256 entries - fix */
if(up->mmul2cache == nil){
/* auxpg since we don't need much? memset if so */
- pg = newpage(1, 0, 0);
+ pg = newpage(0, nil);
pg->va = VA(kmap(pg));
}
else{
pg = up->mmul2cache;
up->mmul2cache = pg->next;
- memset((void*)pg->va, 0, BY2PG);
}
+ memset((void*)pg->va, 0, BY2PG);
+
pg->daddr = x;
pg->next = up->mmul2;
up->mmul2 = pg;
--- a/sys/src/9/xen/fns.h
+++ b/sys/src/9/xen/fns.h
@@ -59,7 +59,6 @@
void mmuinit(void);
ulong mmukmap(ulong, ulong, int);
int mmukmapsync(ulong);
-#define mmunewpage(x)
ulong* mmuwalk(ulong*, ulong, int, int);
char* mtrr(uvlong, uvlong, char *);
int mtrrprint(char *, long);
--- a/sys/src/9/xen/mmu.c
+++ b/sys/src/9/xen/mmu.c
@@ -303,7 +303,7 @@
spllo();
badpages = 0;
for (;;) {
- page = newpage(0, 0, 0);
+ page = newpage(0, nil);
page->va = VA(kmap(page));
if(mpdb)
memmove((void*)page->va, mpdb, BY2PG);
@@ -406,7 +406,7 @@
if(up->mmufree == 0){
badpages = 0;
for (;;) {
- page = newpage(1, 0, 0);
+ page = newpage(0, nil);
page->va = VA(kmap(page));
if (xenptpin(page->va))
break;
@@ -423,10 +423,10 @@
else {
page = up->mmufree;
up->mmufree = page->next;
- memset((void*)page->va, 0, BY2PG);
if (!xenptpin(page->va))
panic("xenptpin");
}
+ memset((void*)page->va, 0, BY2PG);
xenupdate(&pdb[pdbx], page->pa|PTEVALID|PTEUSER|PTEWRITE);
--- a/sys/src/9/zynq/mmu.c
+++ b/sys/src/9/zynq/mmu.c
@@ -153,7 +153,7 @@
if(p != nil)
up->mmufree = p->next;
else
- p = newpage(0, 0, 0);
+ p = newpage(0, nil);
p->daddr = L1RX(va);
p->next = up->mmuused;
up->mmuused = p;
@@ -281,7 +281,7 @@
if((*e & 3) == 0){
if(up->kmaptable != nil)
panic("kmaptable != nil");
- up->kmaptable = newpage(0, 0, 0);
+ up->kmaptable = newpage(0, nil);
s = splhi();
v = tmpmap(up->kmaptable->pa);
memset(v, 0, BY2PG);
--
⑨