ref: a19d90dc8c4d48ec613ac3e2e1462b9099c0a857
parent: 1442891a3e9f3047b0fda4fed8afc0de5fc7da1a
author: Ori Bernstein <ori@eigenstate.org>
date: Sun Jul 7 23:23:40 EDT 2024
gefs: fix allocation log compression We could overwrite the log tail when compressing the allocation log; this would be bad. Stop doing that.
--- a/sys/src/cmd/gefs/blk.c
+++ b/sys/src/cmd/gefs/blk.c
@@ -11,24 +11,25 @@
static vlong blkalloc(int, uint, int);
static void blkdealloc_lk(Arena*, vlong);
static Blk* initblk(Blk*, vlong, vlong, int);
+static void readblk(Blk*, Bptr, int);
int
-checkflag(Blk *b, int f)
+checkflag(Blk *b, int set, int clr)
{
long v;
v = agetl(&b->flag);
- return (v & f) == f;
+ return (v & (set|clr)) == set;
}
void
-setflag(Blk *b, int f)
+setflag(Blk *b, int set, int clr)
{
long ov, nv;
while(1){
ov = agetl(&b->flag);
- nv = ov | f;
+ nv = (ov & ~clr) | set;
if(acasl(&b->flag, ov, nv))
break;
}
@@ -35,39 +36,23 @@
}
void
-clrflag(Blk *b, int f)
-{
- long ov, nv;
-
- while(1){
- ov = agetl(&b->flag);
- nv = ov & ~f;
- if(acasl(&b->flag, ov, nv))
- break;
- }
-}
-
-void
syncblk(Blk *b)
{
- assert(checkflag(b, Bfinal));
+ assert(checkflag(b, Bfinal, 0));
assert(b->bp.addr >= 0);
- clrflag(b, Bdirty);
+ tracex("syncblk", b->bp, b->type, -1);
if(pwrite(fs->fd, b->buf, Blksz, b->bp.addr) == -1)
broke("%B %s: %r", b->bp, Eio);
+ setflag(b, 0, Bdirty);
}
-static Blk*
-readblk(vlong bp, int flg)
+static void
+readblk(Blk *b, Bptr bp, int flg)
{
- vlong off, rem, n;
+ vlong off, xh, ck, rem, n;
char *p;
- Blk *b;
- assert(bp != -1);
- b = cachepluck();
- b->alloced = getcallerpc(&bp);
- off = bp;
+ off = bp.addr;
rem = Blksz;
while(rem != 0){
n = pread(fs->fd, b->buf, rem, off);
@@ -79,9 +64,8 @@
b->cnext = nil;
b->cprev = nil;
b->hnext = nil;
- b->flag = 0;
- b->bp.addr = bp;
+ b->bp.addr = bp.addr;
b->bp.hash = -1;
b->bp.gen = -1;
@@ -127,8 +111,20 @@
b->data = p;
break;
}
+ if(b->type == Tlog || b->type == Tdlist){
+ xh = b->logh;
+ ck = bufhash(b->data, b->logsz);
+ }else{
+ xh = bp.hash;
+ ck = blkhash(b);
+ }
+ if((!flg&GBnochk) && ck != xh){
+ if(!(flg&GBsoftchk))
+ broke("%s: %ullx %llux != %llux", Ecorrupt, bp.addr, xh, ck);
+ fprint(2, "%s: %ullx %llux != %llux", Ecorrupt, bp.addr, xh, ck);
+ error(Ecorrupt);
+ }
assert(b->magic == Magic);
- return b;
}
static Arena*
@@ -244,13 +240,16 @@
{
Blk *lb;
- lb = a->logbuf[a->lbidx++ % nelem(a->logbuf)];
- if(lb->bp.addr != -1)
- cachedel(lb->bp.addr);
+ lb = a->logbuf[0];
+ if(lb == a->logtl)
+ lb = a->logbuf[1];
+ assert(lb->ref == 1);
+ lb->flag = Bstatic;
initblk(lb, o, -1, Tlog);
- finalize(lb);
- syncblk(lb);
traceb("logblk" , lb->bp);
+ lb->lasthold0 = lb->lasthold;
+ lb = holdblk(lb);
+ lb->lasthold = getcallerpc(&a);
return lb;
}
@@ -264,21 +263,26 @@
logappend(Arena *a, vlong off, vlong len, int op)
{
vlong o, start, end;
- Blk *nl, *lb;
+ Blk *lb;
char *p;
- lb = a->logtl;
assert((off & 0xff) == 0);
assert(op == LogAlloc || op == LogFree || op == LogSync);
if(op != LogSync){
start = a->h0->bp.addr;
end = start + a->size + 2*Blksz;
- assert(lb == nil || lb->type == Tlog);
assert(off >= start);
- assert(off <= end);
+ assert(off < end);
}
- assert(lb == nil || lb->logsz >= 0);
- dprint("logop %d: %llx+%llx@%x\n", op, off, len, lb?lb->logsz:-1);
+ lb = a->logtl;
+ assert(lb->ref > 0);
+ assert(lb->type == Tlog);
+ assert(lb->logsz >= 0);
+ dprint("logop %d: %llx+%llx@%x\n", op, off, len, lb->logsz);
+
+ if(checkflag(lb, 0, Bdirty))
+ setflag(lb, Bdirty, Bfinal);
+
/*
* move to the next block when we have
* too little room in the log:
@@ -287,23 +291,16 @@
* 16 bytes of new log entry allocation
* and chaining.
*/
- if(lb == nil || lb->logsz >= Logspc - Logslop){
+ if(lb->logsz >= Logspc - Logslop){
o = blkalloc_lk(a, 0);
if(o == -1)
error(Efull);
- nl = mklogblk(a, o);
p = lb->data + lb->logsz;
PACK64(p, o|LogAlloc1);
lb->logsz += 8;
- lb->logp = nl->bp;
- finalize(lb);
- syncblk(lb);
- a->logtl = nl;
- a->nlog++;
- lb = nl;
+ lb->logp = (Bptr){o, -1, -1};
+ lb = mklogblk(a, o);
}
-
- setflag(lb, Bdirty);
if(len == Blksz){
if(op == LogAlloc)
op = LogAlloc1;
@@ -318,6 +315,16 @@
PACK64(p+8, len);
lb->logsz += 8;
}
+ if(lb != a->logtl) {
+ finalize(lb);
+ syncblk(lb);
+
+ finalize(a->logtl);
+ syncblk(a->logtl);
+ dropblk(a->logtl);
+ a->logtl = lb;
+ a->nlog++;
+ }
}
void
@@ -331,10 +338,13 @@
dprint("loadlog %B\n", bp);
traceb("loadlog", bp);
+ b = a->logbuf[0];
while(1){
- b = getblk(bp, 0);
+ assert(checkflag(b, Bstatic, Bcached));
+ holdblk(b);
+ readblk(b, bp, 0);
dprint("\tload %B chain %B\n", bp, b->logp);
- /* the hash covers the log and offset */
+ a->nlog++;
for(i = 0; i < b->logsz; i += n){
d = b->data + i;
ent = UNPACK64(d);
@@ -348,7 +358,9 @@
if(gen >= fs->qgen){
if(a->logtl == nil){
b->logsz = i;
- a->logtl = holdblk(b);
+ a->logtl = b;
+ cachedel(b->bp.addr);
+ setflag(b, Bdirty, 0);
return;
}
dropblk(b);
@@ -386,21 +398,24 @@
}
void
-compresslog(Arena *a)
+flushlog(Arena *a)
{
+ if(checkflag(a->logtl, 0, Bdirty|Bstatic))
+ return;
+ finalize(a->logtl);
+ syncblk(a->logtl);
+}
- int i, nr, nblks;
+void
+compresslog(Arena *a)
+{
+ int i, nr, nblks, nlog;
vlong sz, *blks;
- Blk *b, *nb;
+ Blk *b;
Arange *r;
- Bptr hd;
char *p;
- tracem("compresslog");
- if(a->logtl != nil){
- finalize(a->logtl);
- syncblk(a->logtl);
- }
+ flushlog(a);
/*
* Prepare what we're writing back.
* Arenas must be sized so that we can
@@ -409,7 +424,7 @@
*/
sz = 0;
nr = 0;
- a->nlog = 0;
+ nlog = 0;
for(r = (Arange*)avlmin(a->free); r != nil; r = (Arange*)avlnext(r)){
sz += 16;
nr++;
@@ -436,27 +451,18 @@
if(blks[i] == -1)
error(Efull);
}
+
/* fill up the log with the ranges from the tree */
i = 0;
- hd = (Bptr){blks[0], -1, -1};
- b = a->logbuf[a->lbidx++ % nelem(a->logbuf)];
- a->logbuf[a->lbidx % nelem(a->logbuf)]->bp = Zb;
- if(b->bp.addr != -1)
- cachedel(b->bp.addr);
- initblk(b, blks[i++], -1, Tlog);
- finalize(b);
+ b = mklogblk(a, blks[i++]);
for(r = (Arange*)avlmin(a->free); r != nil; r = (Arange*)avlnext(r)){
if(b->logsz >= Logspc - Logslop){
- a->nlog++;
- nb = a->logbuf[a->lbidx++ % nelem(a->logbuf)];
- if(nb->bp.addr != -1)
- cachedel(nb->bp.addr);
- initblk(nb, blks[i++], -1, Tlog);
- b->logp = nb->bp;
- setflag(b, Bdirty);
+ b->logp = (Bptr){blks[i], -1, -1};
finalize(b);
syncblk(b);
- b = nb;
+ dropblk(b);
+ nlog++;
+ b = mklogblk(a, blks[i++]);
}
p = b->data + b->logsz;
PACK64(p+0, r->off|LogFree);
@@ -463,16 +469,22 @@
PACK64(p+8, r->len);
b->logsz += 16;
}
- finalize(b);
- syncblk(b);
/*
* now we have a valid freelist, and we can start
* appending stuff to it. Clean up the eagerly
* allocated extra blocks.
+ *
+ * Note that we need to drop the reference to the
+ * old logtl before we free the old blocks, because
+ * deallocating a block may require another block.
*/
- a->loghd = hd;
- a->logtl = b;
+ dropblk(a->logtl);
+ a->loghd = (Bptr){blks[0], -1, -1};
+ a->logtl = b; /* written back by sync() later */
+ a->nlog = nlog;
+
+ /* May add blocks to new log */
for(; i < nblks; i++)
blkdealloc_lk(a, blks[i]);
poperror();
@@ -483,8 +495,6 @@
logbarrier(Arena *a, vlong gen)
{
logappend(a, gen<<8, 0, LogSync);
- if(a->loghd.addr == -1)
- a->loghd = a->logtl->bp;
return 0;
}
@@ -535,24 +545,12 @@
static void
blkdealloc_lk(Arena *a, vlong b)
{
+ cachedel(b);
logappend(a, b, Blksz, LogFree);
- if(a->loghd.addr == -1)
- a->loghd = a->logtl->bp;
freerange(a->free, b, Blksz);
a->used -= Blksz;
}
-void
-blkdealloc(vlong b)
-{
- Arena *a;
-
- a = getarena(b);
- qlock(a);
- blkdealloc_lk(a, b);
- qunlock(a);
-}
-
static vlong
blkalloc(int ty, uint hint, int seq)
{
@@ -588,8 +586,6 @@
goto Again;
}
logappend(a, b, Blksz, LogAlloc);
- if(a->loghd.addr == -1)
- a->loghd = a->logtl->bp;
qunlock(a);
poperror();
return b;
@@ -627,7 +623,7 @@
b->data = b->buf + Leafhdsz;
break;
}
- setflag(b, Bdirty);
+ setflag(b, Bdirty, 0);
b->nval = 0;
b->valsz = 0;
b->nbuf = 0;
@@ -676,7 +672,6 @@
return nil;
tracex("dup" , b->bp, b->type, t->gen);
- setflag(r, Bdirty);
r->bp.hash = -1;
r->nval = b->nval;
r->valsz = b->valsz;
@@ -722,15 +717,12 @@
}
b->bp.hash = blkhash(b);
- setflag(b, Bfinal);
- cacheins(b);
- b->cached = getcallerpc(&b);
+ setflag(b, Bdirty|Bfinal, 0);
}
Blk*
getblk(Bptr bp, int flg)
{
- uvlong xh, ck;
Blk *b;
int i;
@@ -741,31 +733,16 @@
nexterror();
}
if((b = cacheget(bp.addr)) != nil){
+ assert(checkflag(b, 0, Bfreed));
b->lasthold = getcallerpc(&bp);
qunlock(&fs->blklk[i]);
poperror();
return b;
}
- b = readblk(bp.addr, flg);
+ b = cachepluck();
b->alloced = getcallerpc(&bp);
- b->bp.hash = blkhash(b);
- if((flg&GBnochk) == 0){
- if(b->type == Tlog || b->type == Tdlist){
- xh = b->logh;
- ck = bufhash(b->data, b->logsz);
- }else{
- xh = bp.hash;
- ck = b->bp.hash;
- }
- if(ck != xh){
- if(flg & GBsoftchk){
- fprint(2, "%s: %ullx %llux != %llux", Ecorrupt, bp.addr, xh, ck);
- error(Ecorrupt);
- }else{
- broke("%s: %ullx %llux != %llux", Ecorrupt, bp.addr, xh, ck);
- }
- }
- }
+ b->alloced = getcallerpc(&bp);
+ readblk(b, bp, flg);
b->bp.gen = bp.gen;
b->lasthold = getcallerpc(&bp);
cacheins(b);
@@ -787,15 +764,16 @@
void
dropblk(Blk *b)
{
- assert(b == nil || b->ref > 0);
- if(b == nil || adec(&b->ref) != 0)
+ if(b == nil)
return;
b->lastdrop = getcallerpc(&b);
+ if(adec(&b->ref) != 0)
+ return;
/*
* freed blocks go to the LRU bottom
* for early reuse.
*/
- if(checkflag(b, Bfreed))
+ if(checkflag(b, Bfreed, 0))
lrubot(b);
else
lrutop(b);
@@ -843,7 +821,7 @@
}
b->freed = getcallerpc(&t);
tracex("freeb", b->bp, getcallerpc(&t), -1);
- setflag(b, Blimbo);
+ setflag(b, Blimbo, 0);
holdblk(b);
assert(b->ref > 1);
limbo(DFblk, b);
@@ -949,13 +927,13 @@
case DFbp:
f = (Bfree*)p;
a = getarena(f->bp.addr);
- qe.op = Qfree;
- qe.bp = f->bp;
- qe.b = nil;
if((b = cacheget(f->bp.addr)) != nil){
- setflag(b, Bfreed);
+ setflag(b, Bfreed, Bdirty|Blimbo);
dropblk(b);
}
+ qe.op = Qfree;
+ qe.bp = f->bp;
+ qe.b = nil;
qput(a->sync, qe);
qlock(&fs->bfreelk);
f->next = fs->bfree;
@@ -965,14 +943,13 @@
break;
case DFblk:
b = (Blk*)p;
- a = getarena(b->bp.addr);
qe.op = Qfree;
qe.bp = b->bp;
qe.b = nil;
- setflag(b, Bfreed);
- clrflag(b, Blimbo);
- qput(a->sync, qe);
+ setflag(b, Bfreed, Bdirty|Blimbo);
+ a = getarena(b->bp.addr);
dropblk(b);
+ qput(a->sync, qe);
break;
default:
abort();
@@ -987,16 +964,18 @@
Arena *a;
Qent qe;
- assert(checkflag(b, Bdirty));
+ assert(checkflag(b, Bdirty, Bqueued|Bstatic));
assert(b->bp.addr >= 0);
+ finalize(b);
+ if(checkflag(b, 0, Bcached)){
+ cacheins(b);
+ b->cached = getcallerpc(&b);
+ }
+ holdblk(b);
b->enqueued = getcallerpc(&b);
- a = getarena(b->bp.addr);
- holdblk(b);
- finalize(b);
traceb("queueb", b->bp);
- setflag(b, Bqueued);
- b->queued = getcallerpc(&b);
+ a = getarena(b->bp.addr);
qe.op = Qwrite;
qe.bp = b->bp;
qe.b = b;
@@ -1011,10 +990,9 @@
q->nheap = 0;
q->heapsz = fs->cmax;
q->heap = emalloc(q->heapsz*sizeof(Qent), 1);
-
}
-int
+static int
qcmp(Qent *a, Qent *b)
{
if(a->qgen != b->qgen)
@@ -1088,7 +1066,7 @@
rwakeup(&q->fullrz);
qunlock(&q->lk);
if(e.b != nil){
- clrflag(e.b, Bqueued);
+ setflag(e.b, 0, Bqueued);
e.b->queued = 0;
}
return e;
@@ -1120,7 +1098,6 @@
assert(qe.b == nil);
a = getarena(qe.bp.addr);
qlock(a);
- cachedel(qe.bp.addr);
blkdealloc_lk(a, qe.bp.addr);
qunlock(a);
break;
@@ -1133,7 +1110,7 @@
break;
case Qwrite:
tracex("qsyncb", qe.bp, qe.qgen, -1);
- if(checkflag(qe.b, Bfreed) == 0)
+ if(checkflag(qe.b, Bfreed, Bstatic) == 0)
syncblk(qe.b);
dropblk(qe.b);
break;
--- a/sys/src/cmd/gefs/cache.c
+++ b/sys/src/cmd/gefs/cache.c
@@ -32,6 +32,7 @@
* its now in use.
*/
assert(b->magic == Magic);
+ assert(checkflag(b, 0, Bstatic));
if(b->ref != 0){
qunlock(&fs->lrulk);
return;
@@ -58,6 +59,7 @@
* its now in use.
*/
assert(b->magic == Magic);
+ assert(checkflag(b, 0, Bstatic));
if(b->ref != 0){
qunlock(&fs->lrulk);
return;
@@ -84,14 +86,11 @@
bkt = &fs->bcache[h % fs->cmax];
qlock(&fs->lrulk);
traceb("cache", b->bp);
- if(checkflag(b, Bcached)){
- qunlock(&fs->lrulk);
- return;
- }
+ assert(checkflag(b, 0, Bstatic|Bcached));
+ setflag(b, Bcached, 0);
assert(b->hnext == nil);
for(Blk *bb = bkt->b; bb != nil; bb = bb->hnext)
- assert(b != bb);
- setflag(b, Bcached);
+ assert(b != bb && b->bp.addr != bb->bp.addr);
b->cached = getcallerpc(&b);
b->hnext = bkt->b;
bkt->b = b;
@@ -98,7 +97,7 @@
qunlock(&fs->lrulk);
}
-void
+static void
cachedel_lk(vlong addr)
{
Bucket *bkt;
@@ -108,16 +107,19 @@
if(addr == -1)
return;
- tracex("uncache", Zb, addr, getcallerpc(&addr));
+ Bptr bp = {addr, -1, -1};
+ tracex("uncache", bp, -1, getcallerpc(&addr));
h = ihash(addr);
bkt = &fs->bcache[h % fs->cmax];
p = &bkt->b;
for(b = bkt->b; b != nil; b = b->hnext){
if(b->bp.addr == addr){
+ /* FIXME: Until we clean up snap.c, we can have dirty blocks in cache */
+ assert(checkflag(b, Bcached, Bstatic)); //Bdirty));
*p = b->hnext;
- clrflag(b, Bcached);
b->uncached = getcallerpc(&addr);
b->hnext = nil;
+ setflag(b, 0, Bcached);
break;
}
p = &b->hnext;
@@ -127,7 +129,8 @@
cachedel(vlong addr)
{
qlock(&fs->lrulk);
- tracex("uncachelk", Zb, addr, getcallerpc(&addr));
+ Bptr bp = {addr, -1, -1};
+ tracex("uncachelk", bp, -1, getcallerpc(&addr));
cachedel_lk(addr);
qunlock(&fs->lrulk);
}
@@ -170,12 +173,12 @@
b = fs->ctail;
assert(b->magic == Magic);
assert(b->ref == 0);
- if(checkflag(b, Bcached))
+ if(checkflag(b, Bcached, 0))
cachedel_lk(b->bp.addr);
- if(checkflag(b, Bcached))
+ if(checkflag(b, Bcached, 0))
fprint(2, "%B cached %#p freed %#p\n", b->bp, b->cached, b->freed);
+ assert(checkflag(b, 0, Bcached));
lrudel(b);
- assert(!checkflag(b, Bcached));
b->flag = 0;
b->lasthold = 0;
b->lastdrop = 0;
--- a/sys/src/cmd/gefs/check.c
+++ b/sys/src/cmd/gefs/check.c
@@ -158,6 +158,7 @@
fprint(fd, "error loading %B\n", bp);
return 0;
}
+traceb("chklg", bp);
b = getblk(bp, 0);
nb = b->logp;
dropblk(b);
--- a/sys/src/cmd/gefs/dat.h
+++ b/sys/src/cmd/gefs/dat.h
@@ -115,6 +115,7 @@
Bcached = 1 << 3,
Bqueued = 1 << 4,
Blimbo = 1 << 5,
+ Bstatic = 1 << 6,
};
enum {
@@ -540,7 +541,6 @@
Syncq syncq[32];
-
int fd;
long rdonly;
int noauth;
@@ -587,7 +587,6 @@
Avltree *free;
Blk **queue;
int nqueue;
- int lbidx;
Blk *logbuf[2]; /* preallocated log pages */
Blk *h0; /* arena header */
Blk *h1; /* arena footer */
@@ -597,7 +596,7 @@
vlong used;
vlong reserve;
/* allocation log */
- vlong nlog; /* logged since last copression */
+ vlong nlog; /* number of blocks in log */
Bptr loghd; /* allocation log */
Blk *logtl; /* end of the log, open for writing */
Syncq *sync;
@@ -747,6 +746,7 @@
/* debug */
uintptr queued;
uintptr lasthold;
+ uintptr lasthold0;
uintptr lastdrop;
uintptr enqueued;
uintptr cached;
--- a/sys/src/cmd/gefs/fns.h
+++ b/sys/src/cmd/gefs/fns.h
@@ -66,7 +66,6 @@
int logbarrier(Arena *, vlong);
void dlappend(Dlist *dl, Bptr);
void killblk(Tree*, Bptr);
-void blkdealloc(vlong);
ushort blkfill(Blk*);
uvlong blkhash(Blk*);
uvlong bufhash(void*, usize);
@@ -88,6 +87,7 @@
void loadarena(Arena*, Bptr);
void loadfs(char*);
void loadlog(Arena*, Bptr);
+void flushlog(Arena*);
int scandead(Dlist*, int, void(*)(Bptr, void*), void*);
int endfs(void);
void compresslog(Arena*);
@@ -108,9 +108,8 @@
int btnext(Scan*, Kvp*);
void btexit(Scan*);
-int checkflag(Blk *b, int);
-void setflag(Blk *b, int);
-void clrflag(Blk *b, int);
+int checkflag(Blk *b, int, int);
+void setflag(Blk *b, int, int);
char* estrdup(char*);
--- a/sys/src/cmd/gefs/fs.c
+++ b/sys/src/cmd/gefs/fs.c
@@ -73,7 +73,6 @@
Dlist dl;
int i;
-
qlock(&fs->synclk);
if(waserror()){
fprint(2, "failed to sync: %s\n", errmsg());
@@ -119,15 +118,12 @@
* get reused.
*/
logbarrier(a, fs->qgen);
- finalize(a->logtl);
- syncblk(a->logtl);
+ flushlog(a);
packarena(a->h0->data, Blksz, a);
packarena(a->h1->data, Blksz, a);
finalize(a->h0);
finalize(a->h1);
- setflag(a->h0, Bdirty);
- setflag(a->h1, Bdirty);
fs->arenabp[i] = a->h0->bp;
qunlock(a);
}
@@ -137,8 +133,6 @@
packsb(fs->sb1->buf, Blksz, fs);
finalize(fs->sb0);
finalize(fs->sb1);
- setflag(fs->sb0, Bdirty);
- setflag(fs->sb1, Bdirty);
fs->snap.dirty = 0;
qunlock(&fs->mutlk);
@@ -2477,15 +2471,12 @@
ainc(&fs->rdonly);
for(i = 0; i < fs->narena; i++){
a = &fs->arenas[i];
- oldhd[i].addr = -1;
- oldhd[i].hash = -1;
- oldhd[i].gen = -1;
+ oldhd[i] = Zb;
qlock(a);
/*
* arbitrary heuristic -- 10% of our reserved
* space seems like a fine time to compress.
*/
- if(0) /* FIXME: reenable */
if(a->nlog >= a->reserve/(10*Blksz)){
oldhd[i] = a->loghd;
epochstart(id);
--- a/sys/src/cmd/gefs/load.c
+++ b/sys/src/cmd/gefs/load.c
@@ -54,6 +54,12 @@
unpackarena(a, b->data, Arenasz);
if((a->free = avlcreate(rangecmp)) == nil)
error(Enomem);
+ a->logbuf[0] = cachepluck();
+ a->logbuf[1] = cachepluck();
+ a->logbuf[0]->bp = (Bptr){-1, -1, -1};
+ a->logbuf[1]->bp = (Bptr){-1, -1, -1};
+ setflag(a->logbuf[0], Bstatic, 0);
+ setflag(a->logbuf[1], Bstatic, 0);
a->h0 = h0;
a->h1 = h1;
a->used = a->size;
@@ -117,10 +123,6 @@
}
for(i = 0; i < fs->narena; i++){
a = &fs->arenas[i];
- a->logbuf[0] = cachepluck();
- a->logbuf[1] = cachepluck();
- a->logbuf[0]->bp = (Bptr){-1, -1, -1};
- a->logbuf[1]->bp = (Bptr){-1, -1, -1};
loadlog(a, a->loghd);
}
--- a/sys/src/cmd/gefs/ream.c
+++ b/sys/src/cmd/gefs/ream.c
@@ -168,6 +168,7 @@
char *p;
b = cachepluck();
+
addr = hdaddr+2*Blksz; /* leave room for arena hdr */
a->loghd.addr = -1;
@@ -180,7 +181,7 @@
b->logsz = 0;
b->logp = (Bptr){-1, -1, -1};
b->data = b->buf + Loghdsz;
- setflag(b, Bdirty);
+ setflag(b, Bdirty, 0);
p = b->buf + Loghdsz;
b->logp = (Bptr){-1, -1, -1};
@@ -206,21 +207,18 @@
h0->type = Tarena;
h0->bp.addr = hdaddr;
h0->data = h0->buf+2;
+ packarena(h0->data, Arenasz, a);
finalize(h0);
+ syncblk(h0);
+ a->h0 = h0;
memset(h1->buf, 0, sizeof(h1->buf));
h1->type = Tarena;
h1->bp.addr = hdaddr+Blksz;
h1->data = h1->buf+2;
- finalize(h1);
-
- packarena(h0->data, Arenasz, a);
packarena(h1->data, Arenasz, a);
- finalize(h0);
finalize(h1);
- syncblk(h0);
syncblk(h1);
- a->h0 = h0;
a->h1 = h1;
}
--- a/sys/src/cmd/gefs/snap.c
+++ b/sys/src/cmd/gefs/snap.c
@@ -606,7 +606,7 @@
}
p = dl->ins->data + dl->ins->logsz;
dl->ins->logsz += 8;
- setflag(dl->ins, Bdirty);
+ setflag(dl->ins, Bdirty, 0);
PACK64(p, bp.addr);
poperror();
putdl(dl);
--
⑨