shithub: front

Download patch

ref: 47f2839fc9fb3a0fa227bc4a8781791cfde48bc6
parent: 7c12645aba1958a4ece7799c262a835a7fc3e3bb
author: cinap_lenrek <cinap_lenrek@felloff.net>
date: Sun Jul 13 08:43:26 EDT 2025

kernel: cap the amount of work to reclaim pages form hash cain

When looking for free pages in a Image,
we want to skip over recently used pages
as they'r at the head of hash chains.

However, if the cache is very full, this
takes too long. So cap how deep we walk
into each chain and just take what we have.

--- a/sys/src/9/port/page.c
+++ b/sys/src/9/port/page.c
@@ -113,6 +113,7 @@
 	Page **h, **l, **x, *p;
 	Page *fh, *ft;
 	ulong np;
+	int c;
 
 	lock(i);
 	if(i->pgref == 0){
@@ -124,9 +125,15 @@
 	for(h = i->pghash; h < &i->pghash[PGHSIZE]; h++){
 		l = h;
 		x = nil;
+		c = 1;
 		for(p = *l; p != nil; p = p->next){
-			if(p->ref == 0)
+			if(p->ref == 0){
 				x = l;
+				/* too many collisions, take what we have */
+				if(c >= 64)
+					break;
+			}
+			c++;
 			l = &p->next;
 		}
 		if(x == nil)
--