ref: ccbf2ae6831a064e4115aa0ad2181b304dfaeebc
parent: 8585477a0d205a33c9d89a69f73c7ebf4727cd72
author: Ori Bernstein <ori@eigenstate.org>
date: Mon Nov 13 22:46:30 EST 2023
blk: preallocate log blocks to avoid deadlocks if we have no blocks left in to pluck, we can end up in a situation where the free ops that we have queued can end up stuck, so we never end up with new blocks available for the plucking; this change allocates the blocks at FS startup, so we can never have the sync proc waiting for blocks to get freed
--- a/blk.c
+++ b/blk.c
@@ -234,18 +234,15 @@
}
static Blk*
-mklogblk(vlong o)
+mklogblk(Arena *a, vlong o)
{
Blk *lb;
- if((lb = cachepluck()) == nil)
- return nil;
+ lb = a->logbuf[a->lbidx++ % nelem(a->logbuf)];
initblk(lb, o, -1, Tlog);
finalize(lb);
- if(syncblk(lb) == -1){
- dropblk(lb);
+ if(syncblk(lb) == -1)
return nil;
- }
return lb;
}
@@ -289,7 +286,7 @@
if(lb == nil || lb->logsz >= Logspc - Logslop){
if((o = blkalloc_lk(a, 1)) == -1)
return -1;
- if((nl = mklogblk(o)) == nil)
+ if((nl = mklogblk(a, o)) == nil)
return -1;
p = lb->data + lb->logsz;
PACK64(p, o|LogAlloc1);
@@ -301,7 +298,6 @@
fs->broken = 1;
return -1;
}
- dropblk(lb);
a->logtl = nl;
a->nlog++;
lb = nl;
--- a/dat.h
+++ b/dat.h
@@ -517,6 +517,7 @@
long epoch;
long lepoch[32];
Bfree *limbo[3];
+ long nlimbo;
Syncq syncq[32];
@@ -565,9 +566,11 @@
Avltree *free;
Blk **queue;
int nqueue;
- Blk *hd; /* arena header */
- Blk *tl; /* arena footer */
- Blk **q; /* write queue */
+ int lbidx;
+ Blk *logbuf[2]; /* preallocated log pages */
+ Blk *hd; /* arena header */
+ Blk *tl; /* arena footer */
+ Blk **q; /* write queue */
vlong nq;
vlong size;
vlong used;
@@ -598,6 +601,8 @@
Key;
Xdir;
Dent *next;
+ QLock trunclk;
+ int truncating;
vlong up;
long ref;
char gone;
--- a/load.c
+++ b/load.c
@@ -102,6 +102,8 @@
}
for(i = 0; i < fs->narena; i++){
a = &fs->arenas[i];
+ a->logbuf[0] = cachepluck();
+ a->logbuf[1] = cachepluck();
if(loadlog(a, a->loghd) == -1)
sysfatal("load log %B: %r", a->loghd);
}