ref: 4714988ecac57b015fc0c93c65b3c8e3ab25600c
parent: 452c9c92908ef863d29da08345e0b5747a99d411
author: Ori Bernstein <ori@eigenstate.org>
date: Wed Nov 1 23:49:40 EDT 2023
blk: unify log and deadlist formats
--- a/blk.c
+++ b/blk.c
@@ -3,12 +3,6 @@
#include <fcall.h>
#include <avl.h>
-typedef struct Range Range;
-struct Range {
- vlong off;
- vlong len;
-};
-
#include "dat.h"
#include "fns.h"
#include "atomic.h"
@@ -115,14 +109,10 @@
b->data = p;
break;
case Tdlist:
- b->deadsz = UNPACK16(p); p += 2;
- b->deadp = unpackbp(p, Ptrsz); p += Ptrsz;
- assert(p - b->buf == Dlhdsz);
- b->data = p;
- break;
case Tlog:
b->logsz = UNPACK16(p); p += 2;
- b->loghash = UNPACK64(p); p += 8;
+ b->logh = UNPACK64(p); p += 8;
+ b->logp = unpackbp(p, Ptrsz); p += Ptrsz;
assert(p - b->buf == Loghdsz);
b->data = p;
break;
@@ -251,7 +241,6 @@
if((lb = cachepluck()) == nil)
return nil;
initblk(lb, o, -1, Tlog);
- lb->logsz = Loghashsz;
finalize(lb);
if(syncblk(lb) == -1){
dropblk(lb);
@@ -261,15 +250,13 @@
}
static int
-chainlog(Blk *pb, vlong o)
+chainlog(Blk *pb, Bptr nb)
{
- char *p;
-
if(pb == nil)
return 0;
assert(pb->type == Tlog);
- p = pb->data + pb->logsz;
- PACK64(p, o|LogChain);
+ setflag(pb, Bdirty);
+ pb->logp = nb;
finalize(pb);
if(syncblk(pb) == -1){
dropblk(pb);
@@ -288,11 +275,10 @@
static int
logappend(Arena *a, vlong off, vlong len, int op)
{
- vlong o, ao;
+ vlong o;
Blk *nl, *lb;
char *p, *name;
- nl = nil;
lb = a->logtl;
assert((off & 0xff) == 0);
assert(op == LogAlloc || op == LogFree || op == LogSync);
@@ -305,14 +291,13 @@
case LogAlloc: name = "alloc"; break;
case LogFree: name = "free"; break;
case LogSync: name = "sync"; break;
- case LogChain: name = "chain"; break;
default: name = "???"; break;
}
- assert(lb == nil || lb->logsz > 0);
+ assert(lb == nil || lb->logsz >= 0);
dprint("logop %llx+%llx@%x: %s\n", off, len, lb?lb->logsz:-1, name);
/*
* move to the next block when we have
- * 40 bytes in the log:
+ * too little room in the log:
* We're appending up to 16 bytes as
* part of the operation, followed by
* 16 bytes of new log entry allocation
@@ -323,13 +308,17 @@
return -1;
if((nl = mklogblk(o)) == nil)
return -1;
- if(chainlog(lb, nl->bp.addr) == -1)
+ p = lb->data + lb->logsz;
+ PACK64(p, o|LogAlloc1);
+ lb->logsz += 8;
+ if(chainlog(lb, nl->bp) == -1)
return -1;
- lb = nl;
- a->logtl = lb;
+ a->logtl = nl;
a->nlog++;
+ lb = nl;
}
+ setflag(lb, Bdirty);
if(len == Blksz){
if(op == LogAlloc)
op = LogAlloc1;
@@ -344,25 +333,7 @@
PACK64(p+8, len);
lb->logsz += 8;
}
- /*
- * The newly allocated log block needs
- * to be recorded. If we're compressing
- * a log, it needs to go to the tail of
- * the new log, rather than after the
- * current allocation. so that we don't
- * reorder allocs and frees.
- */
- if(nl != nil){
- p = lb->data + lb->logsz;
- ao = nl->bp.addr|LogAlloc1;
- PACK64(p, ao);
- lb->logsz += 8;
- }
- /* this gets overwritten by the next append */
- p = lb->data + lb->logsz;
- PACK64(p, (uvlong)LogEnd);
return 0;
-
}
int
@@ -375,126 +346,80 @@
dprint("loadlog %B\n", bp);
-Nextblk:
- if((b = getblk(bp, GBnochk)) == nil)
- return -1;;
- /* the hash covers the log and offset */
- if(b->loghash != bufhash(b->data, Logspc)){
- werrstr("corrupt log block %B <%llx> [%llx] <%llx>",
- bp, b->loghash, blkhash(b), bufhash(b->data, Logspc));
- return -1;
- }
- for(i = Loghashsz; i < Logspc; i += n){
- d = b->data + i;
- ent = UNPACK64(d);
- op = ent & 0xff;
- off = ent & ~0xff;
- n = (op >= Log2wide) ? 16 : 8;
- switch(op){
- case LogEnd:
- dprint("\tlog@%d: end\n", i);
- if(a->logtl == nil)
- a->logtl = holdblk(b);
- dropblk(b);
- return 0;
- case LogSync:
- gen = ent >> 8;
- dprint("\tlog@%d: sync %llx\n", i, gen);
- if(gen >= fs->qgen){
- if(a->logtl == nil){
- b->logsz = i;
- a->logtl = holdblk(b);
+ while(1){
+ if((b = getblk(bp, 0)) == nil)
+ return -1;
+ dprint("\tload %B chain %B\n", bp, b->logp);
+ /* the hash covers the log and offset */
+ for(i = 0; i < b->logsz; i += n){
+ d = b->data + i;
+ ent = UNPACK64(d);
+ op = ent & 0xff;
+ off = ent & ~0xff;
+ n = (op >= Log2wide) ? 16 : 8;
+ switch(op){
+ case LogSync:
+ gen = ent >> 8;
+ dprint("\tlog@%d: sync %llx\n", i, gen);
+ if(gen >= fs->qgen){
+ if(a->logtl == nil){
+ b->logsz = i;
+ a->logtl = holdblk(b);
+ return 0;
+ }
+ dropblk(b);
return 0;
}
- dropblk(b);
- return 0;
+ break;
+
+ case LogAlloc:
+ case LogAlloc1:
+ len = (op >= Log2wide) ? UNPACK64(d+8) : Blksz;
+ dprint("\tlog@%d alloc: %llx+%llx\n", i, off, len);
+ if(grabrange(a->free, off & ~0xff, len) == -1)
+ return -1;
+ a->used += len;
+ break;
+ case LogFree:
+ case LogFree1:
+ len = (op >= Log2wide) ? UNPACK64(d+8) : Blksz;
+ dprint("\tlog@%d free: %llx+%llx\n", i, off, len);
+ if(freerange(a->free, off & ~0xff, len) == -1){
+ werrstr("invalid free: %r");
+ return -1;
+ }
+ a->used -= len;
+ break;
+ default:
+ n = 0;
+ dprint("\tlog@%d: log op %d\n", i, op);
+ abort();
+ break;
}
- break;
- case LogChain:
- bp.addr = off & ~0xff;
- bp.hash = -1;
- bp.gen = -1;
- dropblk(b);
- dprint("\tlog@%d: chain %B\n", i, bp);
- goto Nextblk;
- break;
-
- case LogAlloc:
- case LogAlloc1:
- len = (op >= Log2wide) ? UNPACK64(d+8) : Blksz;
- dprint("\tlog@%d alloc: %llx+%llx\n", i, off, len);
- if(grabrange(a->free, off & ~0xff, len) == -1)
- return -1;
- a->used += len;
- break;
- case LogFree:
- case LogFree1:
- len = (op >= Log2wide) ? UNPACK64(d+8) : Blksz;
- dprint("\tlog@%d free: %llx+%llx\n", i, off, len);
- if(freerange(a->free, off & ~0xff, len) == -1)
- return -1;
- a->used -= len;
- break;
- default:
- n = 0;
- dprint("\tlog@%d: log op %d\n", i, op);
- abort();
- break;
}
+ if(b->logp.addr == -1){
+ a->logtl = b;
+ return 0;
+ }
+ bp = b->logp;
+ dropblk(b);
}
- return -1;
}
int
compresslog(Arena *a)
{
- vlong v, ba, na, nl, sz;
- vlong graft, oldhd, *log;
- int i, n, nr;
- Blk *b, *hd, *tl;
- Range *rng;
+
+ int i, nr, nblks;
+ vlong sz, *blks;
+ Blk *b, *nb;
Arange *r;
- Bptr bp;
+ Bptr hd;
char *p;
+ if(a->logtl != nil && checkflag(a->logtl, Bdirty))
+ enqueue(a->logtl);
/*
- * Sync the current log to disk, and
- * set up a new block log tail. While
- * compressing the log, nothing else is
- * using this arena, so any allocs come
- * from the log compression, and go into
- * this new log segment.
- *
- * A bit of copy paste from newblk,
- * because otherwise we have a deadlock
- * allocating the block.
- */
- if((ba = blkalloc_lk(a, 1)) == -1)
- return -1;
- if((b = cachepluck()) == nil)
- return -1;
- initblk(b, ba, -1, Tlog);
- b->logsz = Loghashsz;
-
- p = b->data + b->logsz;
- PACK64(p, (uvlong)LogEnd);
- finalize(b);
- if(syncblk(b) == -1){
- dropblk(b);
- return -1;
- }
-
- graft = b->bp.addr;
- if(a->logtl != nil){
- finalize(a->logtl);
- if(syncblk(a->logtl) == -1){
- dropblk(b);
- return -1;
- }
- }
- a->logtl = b;
-
- /*
* Prepare what we're writing back.
* Arenas must be sized so that we can
* keep the merged log in memory for
@@ -502,101 +427,68 @@
*/
sz = 0;
nr = 0;
+ a->nlog = 0;
for(r = (Arange*)avlmin(a->free); r != nil; r = (Arange*)avlnext(r)){
- sz += (r->len == Blksz) ? 8 : 16;
+ sz += 16;
nr++;
}
- nl = (sz+Logspc-Logslop-1)/(Logspc - Logslop);
- if((log = malloc(nl*sizeof(vlong))) == nil)
+
+ /*
+ * Make a pessimistic estimate of the number of blocks
+ * needed to store the ranges, as well as the blocks
+ * used to store the range allocations.
+ *
+ * This does modify the tree, but it's safe because
+ * we can only be removing entries from the tree, not
+ * splitting or inserting new ones.
+ */
+ nblks = (sz+Logspc)/(Logspc - Logslop) + 16*nr/(Logspc-Logslop) + 1;
+ if((blks = calloc(nblks, sizeof(vlong))) == nil)
return -1;
- if((rng = malloc(nr*sizeof(Range))) == nil){
- free(log);
- return -1;
- }
- for(i = 0; i < nl; i++){
- if((log[i] = blkalloc_lk(a, 1)) == -1){
- free(log);
- free(rng);
+ for(i = 0; i < nblks; i++){
+ blks[i] = blkalloc_lk(a, 1);
+ if(blks[i] == -1)
return -1;
- }
}
- nr = 0;
- for(r = (Arange*)avlmin(a->free); r != nil; r = (Arange*)avlnext(r)){
- rng[nr].off = r->off;
- rng[nr].len = r->len;
- nr++;
- }
- n = 0;
- hd = nil;
- tl = nil;
- for(i = 0; i < nr; i++){
- if(tl == nil || tl->logsz >= Logspc - Logslop){
- if((tl = mklogblk(log[n++])) == nil)
- return -1;
- if(chainlog(tl, tl->bp.addr) == -1){
- free(rng);
- free(log);
- return -1;
- }
- if(hd == nil)
- hd = tl;
- p = tl->data + tl->logsz;
- }
- if(rng[i].len == Blksz){
- PACK64(p+0, rng[i].off | LogFree1);
- tl->logsz += 8;
- p += 8;
- }else{
- PACK64(p+0, rng[i].off | LogFree);
- PACK64(p+8, rng[i].len);
- tl->logsz += 16;
- p += 16;
- }
- }
- PACK64(p, LogChain|graft);
- free(log);
- free(rng);
- finalize(tl);
- if(syncblk(tl) == -1)
+ /* fill up the log with the ranges from the tree */
+ i = 0;
+ hd = (Bptr){blks[0], -1, -1};
+ if((b = cachepluck()) == nil)
return -1;
-
- oldhd = a->loghd.addr;
- a->loghd.addr = hd->bp.addr;
- a->loghd.hash = hd->bp.hash;
- a->loghd.gen = -1;
-
- if(oldhd != -1){
- for(ba = oldhd; ba != -1 && ba != graft; ba = na){
- na = -1;
- bp.addr = ba;
- bp.hash = -1;
- bp.gen = -1;
- if((b = getblk(bp, GBnochk)) == nil)
+ initblk(b, blks[i++], -1, Tlog);
+ for(r = (Arange*)avlmin(a->free); r != nil; r = (Arange*)avlnext(r)){
+ if(b->logsz >= Logspc - Logslop){
+ if((nb = cachepluck()) == nil)
return -1;
- for(i = Loghashsz; i < Logspc; i += n){
- p = b->data + i;
- v = UNPACK64(p);
- n = ((v&0xff) >= Log2wide) ? 16 : 8;
- if((v&0xff) == LogChain){
- na = v & ~0xff;
- break;
- }else if((v&0xff) == LogEnd){
- na = -1;
- break;
- }
- }
- cachedel(b->bp.addr);
- if(blkdealloc_lk(a, ba) == -1)
- return -1;
+ a->nlog++;
+ initblk(nb, blks[i++], -1, Tlog);
+ chainlog(b, nb->bp);
+ enqueue(b);
dropblk(b);
+ b = nb;
}
+ p = b->data + b->logsz;
+ PACK64(p+0, r->off|LogFree);
+ PACK64(p+8, r->len);
+ b->logsz += 16;
}
- a->nlog = 0;
- finalize(a->logtl);
- if(syncblk(a->logtl) == -1)
- return -1;
- return 0;
+ finalize(b);
+ enqueue(b);
+
+ /*
+ * now we have a valid freelist, and we can start
+ * appending stuff to it. Clean up the old logs
+ * and the eagerly allocated extra blocks.
+ */
+ dropblk(a->logtl);
+ a->loghd = hd;
+ a->logtl = b;
+ for(; i < nblks; i++){
+ cachedel(b->bp.addr);
+ blkdealloc_lk(a, blks[i]);
+ }
+ return 0;
}
int
@@ -655,6 +547,8 @@
int r;
r = -1;
+assert(b >= a->hd->bp.addr + Blksz);
+assert(b < a->tl->bp.addr);
if(logappend(a, b, Blksz, LogFree) == -1)
return -1;
if(a->loghd.addr == -1)
@@ -744,11 +638,9 @@
b->data = b->buf;
break;
case Tdlist:
- b->deadsz = 0;
- b->data = b->buf + Dlhdsz;
- break;
case Tlog:
b->logsz = 0;
+ b->logp = (Bptr){-1, -1, -1};
b->data = b->buf + Loghdsz;
break;
case Tpivot:
@@ -833,13 +725,11 @@
PACK16(b->buf+4, b->valsz);
break;
case Tdlist:
- PACK16(b->buf+2, b->deadsz);
- packbp(b->buf+4, Ptrsz, &b->deadp);
- break;
case Tlog:
- b->loghash = bufhash(b->data, Logspc);
+ b->logh = bufhash(b->data, b->logsz);
PACK16(b->buf+2, b->logsz);
- PACK64(b->buf+4, b->loghash);
+ PACK64(b->buf+4, b->logh);
+ packbp(b->buf+12, Ptrsz, &b->logp);
break;
case Tdat:
case Tarena:
@@ -850,12 +740,14 @@
b->bp.hash = blkhash(b);
setflag(b, Bfinal);
cacheins(b);
+ b->cached = getcallerpc(&b);
}
Blk*
getblk(Bptr bp, int flg)
{
- uvlong h;
+ uvlong xh, ck;
+ char *t;
Blk *b;
int i;
@@ -870,13 +762,24 @@
return nil;
}
b->alloced = getcallerpc(&bp);
- h = blkhash(b);
- if((flg&GBnochk) == 0 && h != bp.hash){
- fprint(2, "corrupt block %p %B: %.16llux != %.16llux\n", b, bp, h, bp.hash);
- qunlock(&fs->blklk[i]);
- return nil;
+ b->bp.hash = blkhash(b);
+ if((flg&GBnochk) == 0){
+ if(b->type == Tlog || b->type == Tdlist){
+ t = "log";
+ xh = b->logh;
+ ck = bufhash(b->data, b->logsz);
+ }else{
+ t = "block";
+ xh = bp.hash;
+ ck = b->bp.hash;
+ }
+ if(ck != xh){
+ fprint(2, "corrupt %s %p %B: %.16llux != %.16llux\n",
+ t, b, bp, xh, ck);
+ qunlock(&fs->blklk[i]);
+ return nil;
+ }
}
- b->bp.hash = h;
b->bp.gen = bp.gen;
cacheins(b);
qunlock(&fs->blklk[i]);
--- a/dat.h
+++ b/dat.h
@@ -72,10 +72,7 @@
Pivhdsz = 10,
Leafhdsz = 6,
- Loghdsz = 12, /* type, len, hash */
- Loghashsz = 8,
- Dlhdsz = 2+2+Ptrsz, /* type, size, chain */
- Dlspc = Blksz - Dlhdsz,
+ Loghdsz = 2+2+8+Ptrsz, /* type, len, hash, chain */
Rootsz = 4+Ptrsz, /* root pointer */
Pivsz = Blksz - Pivhdsz,
Bufspc = (Blksz - Pivhdsz)/2, /* pivot room */
@@ -299,9 +296,7 @@
/* 1-wide entries */
LogAlloc1, /* alloc a block */
LogFree1, /* free a block */
- LogChain, /* point to next log block */
LogSync, /* sync barrier for replay */
- LogEnd, /* end of log */
/* 2-wide entries */
#define Log2wide LogAlloc
@@ -696,11 +691,8 @@
};
struct {
int logsz; /* @2 for allocation log */
- vlong loghash; /* @6 for log */
- };
- struct {
- int deadsz; /* @2 size of deadlist */
- Bptr deadp; /* @4 next deadlist chain */
+ uvlong logh; /* @4 for log body hash */
+ Bptr logp; /* @12 next deadlist chain */
};
};
--- a/dump.c
+++ b/dump.c
@@ -495,11 +495,11 @@
fprint(fd, "broken: %B\n", hd);
break;
}
- fprint(fd, "deadsz: %xm deadp=%B\n", b->deadsz, b->deadp);
- e = b->data + b->deadsz;
+ fprint(fd, "logsz: %xm logp=%B\n", b->logsz, b->logp);
+ e = b->data + b->logsz;
for(p = b->data; p != e; p += 8)
fprint(fd, "\tdead: %llx\n", UNPACK64(p));
- hd = b->deadp;
+ hd = b->logp;
dropblk(b);
}
}
--- a/fs.c
+++ b/fs.c
@@ -2292,7 +2292,7 @@
void
runtasks(int id, void *)
{
-// int i, c;
+ int i, c;
Amsg *a;
while(1){
@@ -2317,19 +2317,18 @@
* 1/4 of our reserved emergency space seems like a good
* heuristic for big, but it was picked arbitrarily.
*/
-USED(id);
-// qlock(&fs->synclk);
-// for(i = 0; i < fs->narena; i++){
-// epochstart(id);
-// qlock(&fs->arenas[i]);
-// c = fs->arenas[i].nlog > fs->arenas[i].reserve/(4*Blksz);
-// if(c){
-// if(compresslog(&fs->arenas[i]) == -1)
-// fprint(2, "compress log: %r");
-// }
-// qunlock(&fs->arenas[i]);
-// epochend(id);
-// }
-// qunlock(&fs->synclk);
+ qlock(&fs->synclk);
+ for(i = 0; i < fs->narena; i++){
+ epochstart(id);
+ qlock(&fs->arenas[i]);
+ c = fs->arenas[i].nlog > fs->arenas[i].reserve/(4*Blksz);
+ if(c){
+ if(compresslog(&fs->arenas[i]) == -1)
+ fprint(2, "compress log: %r");
+ }
+ qunlock(&fs->arenas[i]);
+ epochend(id);
+ }
+ qunlock(&fs->synclk);
}
}
--- a/ream.c
+++ b/ream.c
@@ -165,8 +165,10 @@
Blk *b, *hd, *tl;
b = cachepluck();
- if(start == 512*MiB)
+ if(start == 512*MiB){
start += Blksz;
+ asz -= Blksz;
+ }
addr = start+Blksz; /* leave room for arena hdr */
a->loghd.addr = -1;
@@ -175,13 +177,18 @@
memset(b->buf, 0, sizeof(b->buf));
b->type = Tlog;
- b->bp.addr = addr;
- b->data = b->buf + Loghdsz;
- if(b->bp.addr == 512*MiB)
+ b->bp.addr = addr+Blksz;
+ if(b->bp.addr == 512*MiB){
b->bp.addr += Blksz;
+ asz -= Blksz;
+ }
+ b->logsz = 0;
+ b->logp = (Bptr){-1, -1, -1};
+ b->data = b->buf + Loghdsz;
setflag(b, Bdirty);
- p = b->data + Loghashsz;
+ p = b->buf + Loghdsz;
+ b->logp = (Bptr){-1, -1, -1};
PACK64(p, addr|LogFree); p += 8; /* addr */
PACK64(p, asz-Blksz); p += 8; /* len */
PACK64(p, b->bp.addr|LogAlloc); p += 8; /* addr */
@@ -192,7 +199,6 @@
p += 8;
}
PACK64(p, (uvlong)LogSync); p += 8; /* barrier */
- PACK64(p, (uvlong)LogEnd); /* done */
b->logsz = p - b->data;
finalize(b);
if(syncblk(b) == -1)
@@ -207,7 +213,6 @@
a->loghd.gen = -1;
a->size = asz;
a->used = Blksz;
- a->logtl = nil;
hd = cachepluck();
tl = cachepluck();
@@ -300,8 +305,6 @@
sysfatal("ream: loadarena: %r");
if(loadlog(a, a->loghd) == -1)
sysfatal("load log: %r");
- if(compresslog(a) == -1)
- sysfatal("compress log: %r");
}
if((mb = newblk(mnt->root, Tleaf)) == nil)
--- a/snap.c
+++ b/snap.c
@@ -154,11 +154,10 @@
bp = dl->hd;
while(bp.addr != -1){
- /* ugly: when we merge deadlists, we change their hash */
- if((b = getblk(bp, GBnochk)) == nil)
+ if((b = getblk(bp, 0)) == nil)
return Efs;
if(docontents){
- for(p = b->data; p != b->data+b->deadsz; p += 8){
+ for(p = b->data; p != b->data+b->logsz; p += 8){
fb.addr = UNPACK64(p);
fb.hash = -1;
fb.gen = -1;
@@ -165,7 +164,7 @@
freeblk(nil, nil, fb);
}
}
- bp = b->deadp;
+ bp = b->logp;
freeblk(&fs->snap, b, b->bp);
dropblk(b);
}
@@ -194,8 +193,7 @@
m->ins = d->ins;
}else{
m->hd = d->hd;
- /* ugly: when we merge deadlists, we change their hash */
- if((b = getblk(d->tl, GBnochk)) == nil)
+ if((b = getblk(d->tl, 0)) == nil)
goto Out;
msg[0].op = Odelete;
dlist2kv(d, &msg[0], buf[0], sizeof(buf[0]));
@@ -209,11 +207,11 @@
dropblk(b);
// TODO: merge
-// if(m->ins->deadsz + d->ins->deadsz < Dlspc){
+// if(m->ins->logsz + d->ins->logsz < Dlspc){
// p = d->ins->data;
-// q = m->ins->data + m->ins->deadsz;
-// for(i = 0; i < d->ins->deadsz; i += 8){
-// m->ins->deadsz += 8;
+// q = m->ins->data + m->ins->logsz;
+// for(i = 0; i < d->ins->logsz; i += 8){
+// m->ins->logsz += 8;
// x = UNPACK64(p);
// PACK64(q, x);
// p += 8;
@@ -545,7 +543,7 @@
dl = &fs->snapdl;
else if((dl = getdl(t->gen, bp.gen)) == nil)
return -1;
- if(dl->ins == nil || Dlspc - dl->ins->deadsz < 8){
+ if(dl->ins == nil || Logspc - dl->ins->logsz < Logslop){
if((b = newblk(&fs->snap, Tdlist)) == nil){
putdl(dl);
return -1;
@@ -555,18 +553,18 @@
/* enqueuing updates the hashes */
if(dl->ins->bp.addr == dl->tl.addr)
dl->tl = dl->ins->bp;
- b->deadp = dl->ins->bp;
+ b->logp = dl->ins->bp;
}else{
dl->tl = b->bp;
- b->deadp = (Bptr){-1, -1, -1};
+ b->logp = (Bptr){-1, -1, -1};
}
cacheins(b);
dl->hd = b->bp;
dl->ins = b;
}
- p = dl->ins->data + dl->ins->deadsz;
+ p = dl->ins->data + dl->ins->logsz;
dl->ins->flag |= Bdirty;
- dl->ins->deadsz += 8;
+ dl->ins->logsz += 8;
PACK64(p, bp.addr);
if(t != &fs->snap)
putdl(dl);