shithub: qk1

Download patch

ref: 9639189ce55ff3124aa4c5956503d21d4206372c
parent: b172dc86c94c1a28a630998a35fdbaf8b4a7fa8f
author: Sigrid Solveig Haflínudóttir <sigrid@ftrv.se>
date: Sat Dec 2 23:09:12 EST 2023

add pleminary Half-Life maps support

Also emulate 80-bit FPU when calculating extents. de_dust2 looked
really bad on amd64 without it.

--- a/Makefile
+++ b/Makefile
@@ -23,6 +23,7 @@
 	common.o\
 	console.o\
 	cvar.o\
+	dotadd.o\
 	d_alpha.o\
 	d_edge.o\
 	d_init.o\
@@ -47,6 +48,7 @@
 	model_brush.o\
 	model_bsp.o\
 	model_bsp2.o\
+	model_bsp30.o\
 	model_sprite.o\
 	net_loop.o\
 	net_main.o\
@@ -71,6 +73,7 @@
 	sbar.o\
 	screen.o\
 	snd.o\
+	softfloat.o\
 	span.o\
 	span_alpha.o\
 	sv_main.o\
--- a/bspfile.h
+++ b/bspfile.h
@@ -1,5 +1,6 @@
 #define BSPVERSION 29
 #define BSP2VERSION ('B'|'S'<<8|'P'<<16|'2'<<24)
+#define BSP30VERSION 30
 
 enum {
 	// upper design bounds
--- a/common.c
+++ b/common.c
@@ -7,15 +7,39 @@
 bool		standard_quake = true, rogue, hipnotic;
 
 void
-torgbx(byte *in, pixel_t *out, int n)
+pal3torgbx(byte *in, pixel_t *out, int n, byte *pal)
 {
+	int x;
+
 	if(in < (byte*)out || in > (byte*)(out+n) || in+n < (byte*)out){
+		while(n-- > 0){
+			x = (*in++)*3;
+			*out++ = 0xff<<24 | pal[x+0]<<16 | pal[x+1]<<8 | pal[x+2];
+		}
+	}else{
+		while(n-- > 0){
+			x = in[n]*3;
+			out[n] = 0xff<<24 | pal[x+0]<<16 | pal[x+1]<<8 | pal[x+2];
+		}
+	}
+}
+
+void
+paltorgbx(byte *in, pixel_t *out, int n, pixel_t *pal)
+{
+	if(in < (byte*)out || in > (byte*)(out+n) || in+n < (byte*)out){
 		while(n-- > 0)
-			*out++ = q1pal[*in++];
+			*out++ = pal[*in++];
 	}else{
 		while(n-- > 0)
-			out[n] = q1pal[in[n]];
+			out[n] = pal[in[n]];
 	}
+}
+
+void
+torgbx(byte *in, pixel_t *out, int n)
+{
+	paltorgbx(in, out, n, q1pal);
 }
 
 /*
--- /dev/null
+++ b/dotadd.c
@@ -1,0 +1,25 @@
+#include "quakedef.h"
+#include "softfloat.h"
+
+float
+dotadd(float *a, float *b)
+{
+	extFloat80_t x, y, m, z;
+	union {
+		float32_t v;
+		float f;
+	}f;
+	int i;
+
+	f.f = b[3];
+	f32_to_extF80M(f.v, &z);
+	for(i = 0; i < 3; i++){
+		f.f = a[i]; f32_to_extF80M(f.v, &x);
+		f.f = b[i]; f32_to_extF80M(f.v, &y);
+		extF80M_mul(&x, &y, &m);
+		extF80M_add(&z, &m, &z);
+		f.v = extF80M_to_f32(&z);
+	}
+
+	return f.f;
+}
--- a/draw.c
+++ b/draw.c
@@ -11,8 +11,9 @@
 qpic_t *draw_disc;
 
 static rectdesc_t r_rectdesc;
-static pixel_t *draw_chars;				// 8*8 graphic characters
+static pixel_t draw_chars[128*128];				// 8*8 graphic characters
 static qpic_t *draw_backtile;
+Wad *wad_gfx;
 
 //=============================================================================
 /* Support Routines */
@@ -27,33 +28,6 @@
 static cachepic_t menu_cachepics[MAX_CACHED_PICS];
 static int menu_numcachepics;
 
-qpic_t *
-Draw_PicFromWad (char *name)
-{
-	mem_user_t dummy = {0};
-	qpic_t *p, *q = W_GetLumpName (name);
-	int n;
-
-	n = q->width*q->height;
-	memset(&dummy, 0, sizeof(dummy));
-	p = Cache_Alloc(&dummy, sizeof(*q)+n*sizeof(pixel_t));
-	memmove(p, q, sizeof(*q));
-	torgbx((byte*)q->data, p->data, n);
-	return p;
-}
-
-void
-CachedPicConv(cachepic_t *p)
-{
-	qpic_t *q;
-	int n;
-
-	q = Cache_Check(&p->cache);
-	n = q->width*q->height;
-	q = Cache_Realloc(&p->cache, sizeof(*q)+n*sizeof(pixel_t));
-	torgbx((byte*)q->data, q->data, n);
-}
-
 /*
 ================
 Draw_CachePic
@@ -62,8 +36,8 @@
 qpic_t	*Draw_CachePic (char *path)
 {
 	cachepic_t	*pic;
-	int			i;
-	qpic_t		*dat;
+	int			i, n;
+	qpic_t		*q;
 
 	for (pic=menu_cachepics, i=0 ; i<menu_numcachepics ; pic++, i++)
 		if (!strcmp (path, pic->name))
@@ -77,23 +51,30 @@
 		strcpy (pic->name, path);
 	}
 
-	dat = Cache_Check(&pic->cache);
+	q = Cache_Check(&pic->cache);
 
-	if(dat)
-		return dat;
+	if(q)
+		return q;
 
 	// load the pic from disk
-	dat = loadcachelmp(path, &pic->cache);
-	if(dat == nil)
+	q = loadcachelmp(path, &pic->cache);
+	if(q == nil)
 		fatal("Draw_CachePic: %s", lerr());
-	SwapPic(dat);
-	CachedPicConv(pic);
+	q->width = LittleLong(q->width);
+	q->height = LittleLong(q->height);
+	n = q->width*q->height;
+	q = Cache_Realloc(&pic->cache, sizeof(*q)+n*sizeof(pixel_t));
+	torgbx((byte*)q->data, q->data, n);
 
-	return Cache_Check(&pic->cache);
+	return q;
 }
 
+qpic_t *
+Draw_PicFromWad(char *name)
+{
+	return W_ReadQpic(wad_gfx, name, nil);
+}
 
-
 /*
 ===============
 Draw_Init
@@ -101,12 +82,12 @@
 */
 void Draw_Init (void)
 {
-	mem_user_t dummy = {0};
-
-	draw_chars = Cache_Alloc(&dummy, 128*128*sizeof(pixel_t));
-	torgbx(W_GetLumpName("conchars"), draw_chars, 128*128);
-	draw_disc = Draw_PicFromWad("disc");
-	draw_backtile = Draw_PicFromWad("backtile");
+	if(W_ReadPixels(wad_gfx, "conchars", draw_chars, nelem(draw_chars)) < 0)
+		fatal("Draw_Init: %s", lerr());
+	if((draw_disc = Draw_PicFromWad("disc")) == nil)
+		fatal("Draw_Init: %s", lerr());
+	if((draw_backtile = Draw_PicFromWad("backtile")) == nil)
+		fatal("Draw_Init: %s", lerr());
 	r_rectdesc.width = draw_backtile->width;
 	r_rectdesc.height = draw_backtile->height;
 	r_rectdesc.ptexpixels = draw_backtile->data;
--- a/host.c
+++ b/host.c
@@ -84,7 +84,8 @@
 This shuts down both the client and server
 ================
 */
-_Noreturn void Host_Error (char *fmt, ...)
+_Noreturn void
+Host_Error (char *fmt, ...)
 {
 	va_list arg;
 	char s[1024];
@@ -94,8 +95,10 @@
 	vsnprint(s, sizeof s, fmt, arg);
 	va_end(arg);
 
-	if(inerror)
-		fatal("Host_Error: recursively entered: %s", fmt);
+	if(inerror){
+		assert(nil);
+		fatal("Host_Error: recursively entered: %s", s);
+	}
 	inerror = true;
 
 	SCR_EndLoadingPlaque();	// reenable screen updates
@@ -584,7 +587,8 @@
 	Chase_Init ();
 	initfs(paths);
 	Host_InitLocal ();
-	W_LoadWadFile ("gfx.wad");
+	extern Wad *wad_gfx;
+	wad_gfx = W_OpenWad("gfx.wad");
 	Key_Init ();
 	Con_Init ();
 	M_Init ();
--- a/mkfile
+++ b/mkfile
@@ -2,9 +2,10 @@
 
 BIN=/$objtype/bin/games
 TARG=quake
-CFLAGS=$CFLAGS -D__plan9__
+CFLAGS=$CFLAGS -p -D__plan9__
 
 OFILES=\
+	${SOFTFLOAT}\
 	span`{test -f span_$objtype.s && echo -n _$objtype}.$O\
 	span_alpha.$O\
 	dotproduct`{test -f span_$objtype.s && echo -n _$objtype}.$O\
@@ -19,6 +20,7 @@
 	common.$O\
 	console.$O\
 	cvar.$O\
+	dotadd.$O\
 	draw.$O\
 	d_alpha.$O\
 	d_edge.$O\
@@ -44,6 +46,7 @@
 	model_brush.$O\
 	model_bsp.$O\
 	model_bsp2.$O\
+	model_bsp30.$O\
 	model_sprite.$O\
 	nanosec.$O\
 	net_dgrm.$O\
@@ -72,6 +75,7 @@
 	sbar.$O\
 	snd.$O\
 	snd_plan9.$O\
+	softfloat.$O\
 	sv_main.$O\
 	sv_move.$O\
 	sv_phys.$O\
--- a/model.c
+++ b/model.c
@@ -103,7 +103,7 @@
 
 		c = in[1];
 		in += 2;
-		while(c){
+		while(c && out - decompressed < row){
 			*out++ = 0;
 			c--;
 		}
--- a/model.h
+++ b/model.h
@@ -309,6 +309,10 @@
 	int	needload;		// bmodels and sprites don't cache normally
 	bool	blend;
 
+	int ver;
+	int numwads;
+	Wad **wads;
+
 	modtype_t	type;
 	int			numframes;
 	synctype_t	synctype;
--- a/model_brush.c
+++ b/model_brush.c
@@ -26,6 +26,11 @@
 int BSP2_LoadMarksurfaces(model_t *mod, byte *in, int sz);
 int BSP2_LoadNodes(model_t *mod, byte *in, int sz);
 
+int BSP30_LoadEntities(model_t *mod, byte *in, int sz);
+int BSP30_LoadLighting(model_t *mod, byte *in, int sz);
+int BSP30_LoadFaces(model_t *mod, byte *in, int sz);
+int BSP30_LoadTextures(model_t *mod, byte *in, int sz);
+
 static float
 RadiusFromBounds(vec3_t mins, vec3_t maxs)
 {
@@ -51,23 +56,24 @@
 	submodel_t *bm;
 	char name[16];
 	int (*loadf[HEADER_LUMPS])(model_t *, byte *, int) = {
+		[LUMP_ENTITIES] = BSP_LoadEntities,
 		[LUMP_VERTEXES] = BSP_LoadVertexes,
-		[LUMP_EDGES] = nil,
+		[LUMP_EDGES] = BSP_LoadEdges,
 		[LUMP_SURFEDGES] = BSP_LoadSurfedges,
 		[LUMP_TEXTURES] = BSP_LoadTextures,
 		[LUMP_LIGHTING] = BSP_LoadLighting,
 		[LUMP_PLANES] = BSP_LoadPlanes,
 		[LUMP_TEXINFO] = BSP_LoadTexinfo,
-		[LUMP_FACES] = nil,
-		[LUMP_MARKSURFACES] = nil,
+		[LUMP_FACES] = BSP_LoadFaces,
+		[LUMP_MARKSURFACES] = BSP_LoadMarksurfaces,
 		[LUMP_VISIBILITY] = BSP_LoadVisibility,
-		[LUMP_LEAFS] = nil,
-		[LUMP_NODES] = nil,
-		[LUMP_CLIPNODES] = nil,
-		[LUMP_ENTITIES] = BSP_LoadEntities,
+		[LUMP_LEAFS] = BSP_LoadLeafs,
+		[LUMP_NODES] = BSP_LoadNodes,
+		[LUMP_CLIPNODES] = BSP_LoadClipnodes,
 		[LUMP_MODELS] = BSP_LoadSubmodels,
 	};
 	static const int order[HEADER_LUMPS] = {
+		LUMP_ENTITIES,
 		LUMP_VERTEXES,
 		LUMP_EDGES,
 		LUMP_SURFEDGES,
@@ -81,7 +87,6 @@
 		LUMP_LEAFS,
 		LUMP_NODES,
 		LUMP_CLIPNODES,
-		LUMP_ENTITIES,
 		LUMP_MODELS,
 	};
 
@@ -88,12 +93,12 @@
 	in = in0;
 	ver = le32(in);
 	if(ver == BSPVERSION){
-		loadf[LUMP_EDGES] = BSP_LoadEdges;
-		loadf[LUMP_FACES] = BSP_LoadFaces;
-		loadf[LUMP_MARKSURFACES] = BSP_LoadMarksurfaces;
-		loadf[LUMP_LEAFS] = BSP_LoadLeafs;
-		loadf[LUMP_NODES] = BSP_LoadNodes;
-		loadf[LUMP_CLIPNODES] = BSP_LoadClipnodes;
+		// all set
+	}else if(ver == BSP30VERSION){
+		loadf[LUMP_ENTITIES] = BSP30_LoadEntities,
+		loadf[LUMP_FACES] = BSP30_LoadFaces;
+		loadf[LUMP_LIGHTING] = BSP30_LoadLighting;
+		loadf[LUMP_TEXTURES] = BSP30_LoadTextures;
 	}else if(ver == BSP2VERSION){
 		loadf[LUMP_EDGES] = BSP2_LoadEdges;
 		loadf[LUMP_FACES] = BSP2_LoadFaces;
@@ -102,7 +107,7 @@
 		loadf[LUMP_NODES] = BSP2_LoadNodes;
 		loadf[LUMP_CLIPNODES] = BSP2_LoadClipnodes;
 	}else{
-		werrstr("unsupported version: %ux", ver);
+		werrstr("unsupported version: %d", ver);
 		goto err;
 	}
 
@@ -112,6 +117,7 @@
 	}
 
 	mod->type = mod_brush;
+	mod->ver = ver;
 
 	for(i = 0; i < nelem(loadf); i++){
 		in = in0+4+2*4*order[i];
--- a/model_bsp.c
+++ b/model_bsp.c
@@ -1,5 +1,7 @@
 #include "quakedef.h"
 
+float dotadd(float *a, float *b);
+
 void
 BSP_SetParent(mnode_t *node, mnode_t *parent)
 {
@@ -31,10 +33,13 @@
 			v = &mod->vertexes[mod->edges[-e].v[1]];
 
 		for(j = 0; j < 2; j++){
-			val = (double)v->position[0] * (double)tex->vecs[j][0] +
-				(double)v->position[1] * (double)tex->vecs[j][1] +
-				(double)v->position[2] * (double)tex->vecs[j][2] +
-				(double)tex->vecs[j][3];
+			// this is... weird.
+			// because everybody built maps long time ago, precision was
+			// (most likely) 80 bits. we could just cast to double here,
+			// but it's not 80 bits and stuff will still be broken.
+			// instead we literally run 80-bit calculation emulated
+			// using SoftFloat. enjoy. or not.
+			val = dotadd(v->position, tex->vecs[j]);
 			if(val < mins[j])
 				mins[j] = val;
 			if(val > maxs[j])
@@ -297,8 +302,7 @@
 		return -1;
 	}
 	mod->numedges = sz / elsz;
-	// FIXME(sigrid): why +1?
-	mod->edges = out = Hunk_Alloc((mod->numedges+1) * sizeof(*out));
+	mod->edges = out = Hunk_Alloc(mod->numedges * sizeof(*out));
 
 	for(i = 0; i < mod->numedges; i++, out++){
 		out->v[0] = le16u(in);
@@ -385,7 +389,8 @@
 
 		memmove(out->styles, in, MAXLIGHTMAPS);
 		in += MAXLIGHTMAPS;
-		out->samples = (i = le32(in)) < 0 ? nil : mod->lightdata + i;
+		i = le32(in);
+		out->samples = i < 0 ? nil : mod->lightdata + i;
 
 		// set the drawing flags flag
 
@@ -623,8 +628,7 @@
 		return -1;
 	}
 	mod->numplanes = sz / elsz;
-	// FIXME(sigrid): why " * 2"???
-	mod->planes = out = Hunk_Alloc(mod->numplanes * 2 * sizeof(*out));
+	mod->planes = out = Hunk_Alloc(mod->numplanes * sizeof(*out));
 
 	for(i = 0; i < mod->numplanes; i++, out++){
 		bits = 0;
--- /dev/null
+++ b/model_bsp30.c
@@ -1,0 +1,266 @@
+#include "quakedef.h"
+
+int BSP_CalcSurfaceExtents(model_t *mod, msurface_t *s);
+
+int
+BSP30_LoadEntities(model_t *mod, byte *in, int sz)
+{
+	char *p, *s, *e, path[32];
+	Wad *w;
+	int maxw;
+
+	mod->numwads = 0;
+	if(sz == 0){
+		mod->entities = nil;
+		return 0;
+	}
+
+	memcpy(mod->entities = Hunk_Alloc(sz), in, sz);
+	if((s = strstr((char*)mod->entities, "\"wad\"")) == nil ||
+	   (s = strchr(s+5, '"')) == nil ||
+	   (e = strchr(s+1, '"')) == nil)
+		return 0;
+
+	maxw = 4;
+	mod->wads = Hunk_Alloc(maxw * sizeof(*mod->wads));
+	for(s = s+1, p = s; s <= e; s++){
+		if(p != s && (*s == ';' || s == e)){
+			snprintf(path, sizeof(path), "%.*s", (int)(s-p), p);
+			if((w = W_OpenWad(path)) != nil){
+				mod->wads = Arr_AllocExtra(mod->wads, &maxw, mod->numwads-maxw);
+				mod->wads[mod->numwads++] = w;
+			}else{
+				Con_Printf("BSP_LoadEntities: %s\n", lerr());
+				continue;
+			}
+			p = s+1;
+		}else if(*s == '\\')
+			p = s+1;
+	}
+	return 0;
+}
+
+int
+BSP30_LoadLighting(model_t *mod, byte *in, int sz)
+{
+	int i;
+
+	if(sz == 0){
+		mod->lightdata = nil;
+		return 0;
+	}
+
+	// FIXME(sigrid): change when colored lighting support is in
+	sz /= 3;
+	mod->lightdata = Hunk_Alloc(sz);
+	for(i = 0; i < sz; i++, in += 3)
+		mod->lightdata[i] = (in[0] + in[1] + in[2])/3;
+	return 0;
+}
+
+int
+BSP30_LoadFaces(model_t *mod, byte *in, int sz)
+{
+	msurface_t *out;
+	int i, surfnum;
+	static const int elsz = 2+2+4+2+2+MAXLIGHTMAPS+4;
+
+	if(sz % elsz){
+		werrstr("BSP_LoadFaces: funny lump size");
+		return -1;
+	}
+	mod->numsurfaces = sz / elsz;
+	mod->surfaces = out = Hunk_Alloc(mod->numsurfaces * sizeof(*out));
+
+	for(surfnum = 0; surfnum < mod->numsurfaces; surfnum++, out++){
+		out->plane = mod->planes + le16u(in);
+		out->flags = le16u(in) ? SURF_PLANEBACK : 0;
+		out->firstedge = le32u(in);
+		out->numedges = le16u(in);
+		out->texinfo = mod->texinfo + le16u(in);
+
+		if(BSP_CalcSurfaceExtents(mod, out) != 0)
+			return -1;
+
+		// lighting info
+		memmove(out->styles, in, MAXLIGHTMAPS);
+		in += MAXLIGHTMAPS;
+		i = le32(in);
+		if(i > 0){
+			if(i % 3)
+				Con_Printf("misaligned light samples: %d\n", i);
+			else{
+				// FIXME(sigrid): change when colored lighting support is in
+				out->samples = mod->lightdata + i/3;
+			}
+		}
+
+		// set the drawing flags flag
+		if(strncmp(out->texinfo->texture->name, "sky", 3) == 0)
+			out->flags |= SURF_DRAWSKY | SURF_DRAWTILED;
+		else if(out->texinfo->texture->name[0] == '!'){	// turbulent
+			out->flags |= SURF_DRAWTURB | SURF_DRAWTILED | SURF_TRANS;
+			for(i = 0; i < 2; i++){
+				out->extents[i] = 16384;
+				out->texturemins[i] = -8192;
+			}
+		}else if(out->texinfo->texture->name[0] == '{')
+			out->flags |= SURF_TRANS | SURF_FENCE;
+	}
+	return 0;
+}
+
+int
+BSP30_LoadTextures(model_t *mod, byte *in, int sz)
+{
+	int off, i, j, pixels, num, max, altmax, w, h, palsz;
+	byte *p, *in0, *x;
+	texture_t *tx, *tx2;
+	texture_t *anims[10];
+	texture_t *altanims[10];
+	static const int elsz = 16+2*4+4*4;
+
+	if(sz < 1){
+		mod->textures = nil;
+		return 0;
+	}
+	if(sz < 4 || (sz % 4) != 0){
+		werrstr("funny lump size");
+		goto err;
+	}
+
+	in0 = in;
+	mod->numtextures = le32(in);
+	if(mod->numtextures*4 > sz-4){
+		werrstr("overflow? %d > %d", mod->numtextures*4, sz-4);
+		goto err;
+	}
+	mod->textures = Hunk_Alloc(mod->numtextures * sizeof(*mod->textures));
+
+	for(i = 0; i < mod->numtextures; i++){
+		off = le32(in);
+		if(off == -1)
+			continue;
+		if(off < 0 || off > sz-elsz){
+			werrstr("bad offset %d (sz %d)", off, sz);
+			goto err;
+		}
+		p = in0+off+16;
+		w = le32(p);
+		h = le32(p);
+		pixels = w*h*85/64;
+		tx = Hunk_Alloc(sizeof(*tx) + pixels*sizeof(pixel_t));
+		strncpy(tx->name, (char*)in0+off, sizeof(tx->name)-1);
+		tx->name[sizeof(tx->name)-1] = 0;
+		for(j = 0; j < MIPLEVELS; j++)
+			tx->offsets[j] = sizeof(texture_t) + (le32(p) - (16+2*4+4*4))*sizeof(pixel_t);
+		mod->textures[i] = tx;
+		tx->width = w;
+		tx->height = h;
+		if(tx->offsets[0] > 0){
+			// the pixels immediately follow the structures
+			x = p + pixels;
+			palsz = le16(x);
+			if(palsz == 256)
+				pal3torgbx(p, (pixel_t*)(tx+1), pixels, x);
+		}else{
+			// alternative: outside, in a wad
+			for(j = 0; j < mod->numwads; j++){
+				if(W_ReadMipTex(mod->wads[j], tx->name, tx) >= 0)
+					break;
+			}
+			if(j >= mod->numwads)
+				Con_Printf("missing texture: %s\n", tx->name);
+		}
+		if(strncmp(tx->name, "sky", 3) == 0)
+			R_InitSky(tx);
+	}
+
+	// sequence the animations
+	for(i = 0; i < mod->numtextures; i++){
+		tx = mod->textures[i];
+		if(!tx || tx->name[0] != '+')
+			continue;
+		if(tx->anim_next)
+			continue;	// already sequenced
+
+		// find the number of frames in the animation
+		memset(anims, 0, sizeof(anims));
+		memset(altanims, 0, sizeof(altanims));
+
+		max = tx->name[1];
+		if(max >= 'a' && max <= 'z')
+			max -= 'a' - 'A';
+		if(max >= '0' && max <= '9'){
+			max -= '0';
+			altmax = 0;
+			anims[max++] = tx;
+		}else if(max >= 'A' && max <= 'J'){
+			altmax = max - 'A';
+			max = 0;
+			altanims[altmax++] = tx;
+		}else{
+badanim:
+			werrstr("bad animating texture: %s", tx->name);
+			goto err;
+		}
+
+		for(j = i+1; j < mod->numtextures; j++){
+			tx2 = mod->textures[j];
+			if(!tx2 || tx2->name[0] != '+')
+				continue;
+			if(strcmp(tx2->name+2, tx->name+2) != 0)
+				continue;
+
+			num = tx2->name[1];
+			if(num >= 'a' && num <= 'z')
+				num -= 'a' - 'A';
+			if(num >= '0' && num <= '9'){
+				num -= '0';
+				anims[num] = tx2;
+				if(num+1 > max)
+					max = num + 1;
+			}else if(num >= 'A' && num <= 'J'){
+				num = num - 'A';
+				altanims[num] = tx2;
+				if(num+1 > altmax)
+					altmax = num+1;
+			}else{
+				goto badanim;
+			}
+		}
+
+#define	ANIM_CYCLE	2
+		// link them all together
+		for(j = 0; j < max; j++){
+			tx2 = anims[j];
+			if(!tx2){
+badframe:
+				werrstr("missing frame %d of %s", j, tx->name);
+				goto err;
+			}
+			tx2->anim_total = max * ANIM_CYCLE;
+			tx2->anim_min = j * ANIM_CYCLE;
+			tx2->anim_max = (j+1) * ANIM_CYCLE;
+			tx2->anim_next = anims[(j+1) % max];
+			if(altmax)
+				tx2->alternate_anims = altanims[0];
+		}
+		for(j = 0; j < altmax; j++){
+			tx2 = altanims[j];
+			if(!tx2)
+				goto badframe;
+			tx2->anim_total = altmax * ANIM_CYCLE;
+			tx2->anim_min = j * ANIM_CYCLE;
+			tx2->anim_max = (j+1) * ANIM_CYCLE;
+			tx2->anim_next = altanims[(j+1) % altmax];
+			if(max)
+				tx2->alternate_anims = anims[0];
+		}
+	}
+
+	return 0;
+err:
+	werrstr("BSP_LoadTextures: %s", lerr());
+	return -1;
+}
--- a/quakedef.h
+++ b/quakedef.h
@@ -182,6 +182,8 @@
 #define opaque(p) (((p)>>24) != 0xff)
 
 extern pixel_t q1pal[256];
+void pal3torgbx(byte *in, pixel_t *out, int n, byte *pal);
+void paltorgbx(byte *in, pixel_t *out, int n, pixel_t *pal);
 void torgbx(byte *in, pixel_t *out, int n);
 
 void Host_ClearMemory (void);
--- a/r_surf.c
+++ b/r_surf.c
@@ -278,8 +278,34 @@
 
 //=============================================================================
 
+static pixel_t
+addlight(pixel_t x, int light)
+{
+	int r, g, b, y;
+	r = (x>>16) & 0xff;
+	g = (x>>8)  & 0xff;
+	b = (x>>0)  & 0xff;
+	y = (light & 0xff00) >> 8;
+
+	r = (r * (63-y)+16) >> 5; r = min(r, 255);
+	g = (g * (63-y)+16) >> 5; g = min(g, 255);
+	b = (b * (63-y)+16) >> 5; b = min(b, 255);
+	x = (x & ~0xffffff) | r<<16 | g<<8 | b<<0;
+
 /*
-================
+		t = (255*256 - (int)blocklights[i]) >> (8 - VID_CBITS);
+
+		if (t < (1 << 6))
+			t = (1 << 6);
+
+		blocklights[i] = t;
+*/
+
+	return x;
+}
+
+/*
+================
 R_DrawSurfaceBlock8_mip0
 ================
 */
@@ -306,24 +332,30 @@
 			lightstep = lighttemp >> 4;
 
 			light = lightright;
-
-			prowdest[15] = vid.colormap[((light += lightstep) & 0xFF00) + CIND(psource[15])];
-			prowdest[14] = vid.colormap[((light += lightstep) & 0xFF00) + CIND(psource[14])];
-			prowdest[13] = vid.colormap[((light += lightstep) & 0xFF00) + CIND(psource[13])];
-			prowdest[12] = vid.colormap[((light += lightstep) & 0xFF00) + CIND(psource[12])];
-			prowdest[11] = vid.colormap[((light += lightstep) & 0xFF00) + CIND(psource[11])];
-			prowdest[10] = vid.colormap[((light += lightstep) & 0xFF00) + CIND(psource[10])];
-			prowdest[9] = vid.colormap[((light += lightstep) & 0xFF00) + CIND(psource[9])];
-			prowdest[8] = vid.colormap[((light += lightstep) & 0xFF00) + CIND(psource[8])];
-			prowdest[7] = vid.colormap[((light += lightstep) & 0xFF00) + CIND(psource[7])];
-			prowdest[6] = vid.colormap[((light += lightstep) & 0xFF00) + CIND(psource[6])];
-			prowdest[5] = vid.colormap[((light += lightstep) & 0xFF00) + CIND(psource[5])];
-			prowdest[4] = vid.colormap[((light += lightstep) & 0xFF00) + CIND(psource[4])];
-			prowdest[3] = vid.colormap[((light += lightstep) & 0xFF00) + CIND(psource[3])];
-			prowdest[2] = vid.colormap[((light += lightstep) & 0xFF00) + CIND(psource[2])];
-			prowdest[1] = vid.colormap[((light += lightstep) & 0xFF00) + CIND(psource[1])];
-			prowdest[0] = vid.colormap[(light & 0xFF00) + CIND(psource[0])];
-
+			if(1 || currententity->model->ver == BSP30VERSION){
+				int j;
+				for(j = 15; j >= 0; j--){
+					prowdest[j] = addlight(psource[j], light);
+					light += lightstep;
+				}
+			}else{
+				prowdest[15] = vid.colormap[((light += lightstep) & 0xFF00) + CIND(psource[15])];
+				prowdest[14] = vid.colormap[((light += lightstep) & 0xFF00) + CIND(psource[14])];
+				prowdest[13] = vid.colormap[((light += lightstep) & 0xFF00) + CIND(psource[13])];
+				prowdest[12] = vid.colormap[((light += lightstep) & 0xFF00) + CIND(psource[12])];
+				prowdest[11] = vid.colormap[((light += lightstep) & 0xFF00) + CIND(psource[11])];
+				prowdest[10] = vid.colormap[((light += lightstep) & 0xFF00) + CIND(psource[10])];
+				prowdest[9] = vid.colormap[((light += lightstep) & 0xFF00) + CIND(psource[9])];
+				prowdest[8] = vid.colormap[((light += lightstep) & 0xFF00) + CIND(psource[8])];
+				prowdest[7] = vid.colormap[((light += lightstep) & 0xFF00) + CIND(psource[7])];
+				prowdest[6] = vid.colormap[((light += lightstep) & 0xFF00) + CIND(psource[6])];
+				prowdest[5] = vid.colormap[((light += lightstep) & 0xFF00) + CIND(psource[5])];
+				prowdest[4] = vid.colormap[((light += lightstep) & 0xFF00) + CIND(psource[4])];
+				prowdest[3] = vid.colormap[((light += lightstep) & 0xFF00) + CIND(psource[3])];
+				prowdest[2] = vid.colormap[((light += lightstep) & 0xFF00) + CIND(psource[2])];
+				prowdest[1] = vid.colormap[((light += lightstep) & 0xFF00) + CIND(psource[1])];
+				prowdest[0] = vid.colormap[(light & 0xFF00) + CIND(psource[0])];
+			}
 			psource += sourcetstep;
 			lightright += lightrightstep;
 			lightleft += lightleftstep;
@@ -365,11 +397,17 @@
 
 			light = lightright;
 
-			for (b=7; b>=0; b--)
-			{
-				pix = psource[b];
-				prowdest[b] = vid.colormap[(light & 0xFF00) + CIND(pix)];
-				light += lightstep;
+			if(currententity->model->ver == BSP30VERSION){
+				for(b = 7; b >= 0; b--){
+					prowdest[b] = addlight(psource[b], light);
+					light += lightstep;
+				}
+			}else{
+				for (b=7; b>=0; b--){
+					pix = psource[b];
+					prowdest[b] = vid.colormap[(light & 0xFF00) + CIND(pix)];
+					light += lightstep;
+				}
 			}
 
 			psource += sourcetstep;
@@ -413,11 +451,17 @@
 
 			light = lightright;
 
-			for (b=3; b>=0; b--)
-			{
-				pix = psource[b];
-				prowdest[b] = vid.colormap[(light & 0xFF00) + CIND(pix)];
-				light += lightstep;
+			if(1 || currententity->model->ver == BSP30VERSION){
+				for(b = 3; b >= 0; b--){
+					prowdest[b] = addlight(psource[b], light);
+					light += lightstep;
+				}
+			}else{
+				for (b=3; b>=0; b--){
+					pix = psource[b];
+					prowdest[b] = vid.colormap[(light & 0xFF00) + CIND(pix)];
+					light += lightstep;
+				}
 			}
 
 			psource += sourcetstep;
@@ -440,7 +484,7 @@
 static void R_DrawSurfaceBlock8_mip3 (void)
 {
 	int v, i, b, lightstep, lighttemp, light, lightleft, lightright;
-	pixel_t pix, *psource, *prowdest;
+	pixel_t *psource, *prowdest;
 
 	psource = pbasesource;
 	prowdest = prowdestbase;
@@ -461,11 +505,16 @@
 
 			light = lightright;
 
-			for (b=1; b>=0; b--)
-			{
-				pix = psource[b];
-				prowdest[b] = vid.colormap[(light & 0xFF00) + CIND(pix)];
-				light += lightstep;
+			if(1 || currententity->model->ver == BSP30VERSION){
+				for (b=1; b>=0; b--){
+					prowdest[b] = addlight(psource[b], light);
+					light += lightstep;
+				}
+			}else{
+				for (b=1; b>=0; b--){
+					prowdest[b] = vid.colormap[(light & 0xFF00) + CIND(psource[b])];
+					light += lightstep;
+				}
 			}
 
 			psource += sourcetstep;
--- /dev/null
+++ b/softfloat.c
@@ -1,0 +1,1648 @@
+/*============================================================================
+
+This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic
+Package, Release 3e, by John R. Hauser.
+
+Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of
+California.  All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+    this list of conditions, and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright notice,
+    this list of conditions, and the following disclaimer in the documentation
+    and/or other materials provided with the distribution.
+
+ 3. Neither the name of the University nor the names of its contributors may
+    be used to endorse or promote products derived from this software without
+    specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
+DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+=============================================================================*/
+
+#include "quakedef.h"
+#include "softfloat.h"
+
+#ifndef UINT64_C
+#define UINT64_C(x) x##ULL
+#endif
+
+u8int softfloat_roundingMode = softfloat_round_near_even;
+u8int softfloat_detectTininess = init_detectTininess;
+u8int softfloat_exceptionFlags = 0;
+u8int extF80_roundingPrecision = 80;
+
+bool
+extF80M_isSignalingNaN( const extFloat80_t *aPtr )
+{
+    const struct extFloat80M *aSPtr;
+    u64int uiA0;
+
+    aSPtr = (const struct extFloat80M *) aPtr;
+    if ( (aSPtr->signExp & 0x7FFF) != 0x7FFF ) return false;
+    uiA0 = aSPtr->signif;
+    return
+        ! (uiA0 & UINT64_C( 0x4000000000000000 ))
+            && (uiA0 & UINT64_C( 0x3FFFFFFFFFFFFFFF));
+
+}
+
+/*----------------------------------------------------------------------------
+| Converts the common NaN pointed to by `aPtr' into an 80-bit extended
+| floating-point NaN, and stores this NaN at the location pointed to by
+| `zSPtr'.
+*----------------------------------------------------------------------------*/
+void
+softfloat_commonNaNToExtF80M(const struct commonNaN *aPtr, struct extFloat80M *zSPtr )
+{
+
+    zSPtr->signExp = packToExtF80UI64( aPtr->sign, 0x7FFF );
+    zSPtr->signif = UINT64_C( 0xC000000000000000 ) | aPtr->v64>>1;
+
+}
+
+/*----------------------------------------------------------------------------
+| Converts the common NaN pointed to by `aPtr' into a 32-bit floating-point
+| NaN, and returns the bit pattern of this value as an unsigned integer.
+*----------------------------------------------------------------------------*/
+u32int
+softfloat_commonNaNToF32UI( const struct commonNaN *aPtr )
+{
+
+    return (u32int) aPtr->sign<<31 | 0x7FC00000 | aPtr->v64>>41;
+
+}
+
+/*----------------------------------------------------------------------------
+| Assuming the 80-bit extended floating-point value pointed to by `aSPtr' is
+| a NaN, converts this NaN to the common NaN form, and stores the resulting
+| common NaN at the location pointed to by `zPtr'.  If the NaN is a signaling
+| NaN, the invalid exception is raised.
+*----------------------------------------------------------------------------*/
+void
+softfloat_extF80MToCommonNaN(const struct extFloat80M *aSPtr, struct commonNaN *zPtr )
+{
+
+    if ( extF80M_isSignalingNaN( (const extFloat80_t *) aSPtr ) ) {
+        softfloat_raiseFlags( softfloat_flag_invalid );
+    }
+    zPtr->sign = signExtF80UI64( aSPtr->signExp );
+    zPtr->v64 = aSPtr->signif<<1;
+    zPtr->v0  = 0;
+
+}
+
+/*----------------------------------------------------------------------------
+| Assuming `uiA' has the bit pattern of a 32-bit floating-point NaN, converts
+| this NaN to the common NaN form, and stores the resulting common NaN at the
+| location pointed to by `zPtr'.  If the NaN is a signaling NaN, the invalid
+| exception is raised.
+*----------------------------------------------------------------------------*/
+void
+softfloat_f32UIToCommonNaN( u32int uiA, struct commonNaN *zPtr )
+{
+
+    if ( softfloat_isSigNaNF32UI( uiA ) ) {
+        softfloat_raiseFlags( softfloat_flag_invalid );
+    }
+    zPtr->sign = uiA>>31;
+    zPtr->v64  = (u64int) uiA<<41;
+    zPtr->v0   = 0;
+
+}
+
+/*----------------------------------------------------------------------------
+| Assuming at least one of the two 80-bit extended floating-point values
+| pointed to by `aSPtr' and `bSPtr' is a NaN, stores the combined NaN result
+| at the location pointed to by `zSPtr'.  If either original floating-point
+| value is a signaling NaN, the invalid exception is raised.
+*----------------------------------------------------------------------------*/
+void
+softfloat_propagateNaNExtF80M(
+     const struct extFloat80M *aSPtr,
+     const struct extFloat80M *bSPtr,
+     struct extFloat80M *zSPtr
+ )
+{
+    bool isSigNaNA;
+    const struct extFloat80M *sPtr;
+    bool isSigNaNB;
+    u16int uiB64;
+    u64int uiB0;
+    u16int uiA64;
+    u64int uiA0;
+    u16int uiMagA64, uiMagB64;
+
+    isSigNaNA = extF80M_isSignalingNaN( (const extFloat80_t *) aSPtr );
+    sPtr = aSPtr;
+    if ( ! bSPtr ) {
+        if ( isSigNaNA ) softfloat_raiseFlags( softfloat_flag_invalid );
+        goto copy;
+    }
+    isSigNaNB = extF80M_isSignalingNaN( (const extFloat80_t *) bSPtr );
+    if ( isSigNaNA | isSigNaNB ) {
+        softfloat_raiseFlags( softfloat_flag_invalid );
+        if ( isSigNaNA ) {
+            uiB64 = bSPtr->signExp;
+            if ( isSigNaNB ) goto returnLargerUIMag;
+            uiB0 = bSPtr->signif;
+            if ( isNaNExtF80UI( uiB64, uiB0 ) ) goto copyB;
+            goto copy;
+        } else {
+            uiA64 = aSPtr->signExp;
+            uiA0 = aSPtr->signif;
+            if ( isNaNExtF80UI( uiA64, uiA0 ) ) goto copy;
+            goto copyB;
+        }
+    }
+    uiB64 = bSPtr->signExp;
+ returnLargerUIMag:
+    uiA64 = aSPtr->signExp;
+    uiMagA64 = uiA64 & 0x7FFF;
+    uiMagB64 = uiB64 & 0x7FFF;
+    if ( uiMagA64 < uiMagB64 ) goto copyB;
+    if ( uiMagB64 < uiMagA64 ) goto copy;
+    uiA0 = aSPtr->signif;
+    uiB0 = bSPtr->signif;
+    if ( uiA0 < uiB0 ) goto copyB;
+    if ( uiB0 < uiA0 ) goto copy;
+    if ( uiA64 < uiB64 ) goto copy;
+ copyB:
+    sPtr = bSPtr;
+ copy:
+    zSPtr->signExp = sPtr->signExp;
+    zSPtr->signif = sPtr->signif | UINT64_C( 0xC000000000000000 );
+
+}
+
+/*----------------------------------------------------------------------------
+| Interpreting the unsigned integer formed from concatenating 'uiA64' and
+| 'uiA0' as an 80-bit extended floating-point value, and likewise interpreting
+| the unsigned integer formed from concatenating 'uiB64' and 'uiB0' as another
+| 80-bit extended floating-point value, and assuming at least on of these
+| floating-point values is a NaN, returns the bit pattern of the combined NaN
+| result.  If either original floating-point value is a signaling NaN, the
+| invalid exception is raised.
+*----------------------------------------------------------------------------*/
+struct uint128
+ softfloat_propagateNaNExtF80UI(
+     u16int uiA64,
+     u64int uiA0,
+     u16int uiB64,
+     u64int uiB0
+ )
+{
+    bool isSigNaNA, isSigNaNB;
+    u64int uiNonsigA0, uiNonsigB0;
+    u16int uiMagA64, uiMagB64;
+    struct uint128 uiZ;
+
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+    isSigNaNA = softfloat_isSigNaNExtF80UI( uiA64, uiA0 );
+    isSigNaNB = softfloat_isSigNaNExtF80UI( uiB64, uiB0 );
+    /*------------------------------------------------------------------------
+    | Make NaNs non-signaling.
+    *------------------------------------------------------------------------*/
+    uiNonsigA0 = uiA0 | UINT64_C( 0xC000000000000000 );
+    uiNonsigB0 = uiB0 | UINT64_C( 0xC000000000000000 );
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+    if ( isSigNaNA | isSigNaNB ) {
+        softfloat_raiseFlags( softfloat_flag_invalid );
+        if ( isSigNaNA ) {
+            if ( isSigNaNB ) goto returnLargerMag;
+            if ( isNaNExtF80UI( uiB64, uiB0 ) ) goto returnB;
+            goto returnA;
+        } else {
+            if ( isNaNExtF80UI( uiA64, uiA0 ) ) goto returnA;
+            goto returnB;
+        }
+    }
+ returnLargerMag:
+    uiMagA64 = uiA64 & 0x7FFF;
+    uiMagB64 = uiB64 & 0x7FFF;
+    if ( uiMagA64 < uiMagB64 ) goto returnB;
+    if ( uiMagB64 < uiMagA64 ) goto returnA;
+    if ( uiA0 < uiB0 ) goto returnB;
+    if ( uiB0 < uiA0 ) goto returnA;
+    if ( uiA64 < uiB64 ) goto returnA;
+ returnB:
+    uiZ.v64 = uiB64;
+    uiZ.v0  = uiNonsigB0;
+    return uiZ;
+ returnA:
+    uiZ.v64 = uiA64;
+    uiZ.v0  = uiNonsigA0;
+    return uiZ;
+
+}
+
+/*----------------------------------------------------------------------------
+| Raises the exceptions specified by `flags'.  Floating-point traps can be
+| defined here if desired.  It is currently not possible for such a trap
+| to substitute a result value.  If traps are not implemented, this routine
+| should be simply `softfloat_exceptionFlags |= flags;'.
+*----------------------------------------------------------------------------*/
+void
+softfloat_raiseFlags( u8int flags )
+{
+
+    softfloat_exceptionFlags |= flags;
+
+}
+
+u64int
+softfloat_shortShiftRightJam64( u64int a, u8int dist )
+{
+
+    return a>>dist | ((a & (((u64int) 1<<dist) - 1)) != 0);
+
+}
+
+/*----------------------------------------------------------------------------
+| A constant table that translates an 8-bit unsigned integer (the array index)
+| into the number of leading 0 bits before the most-significant 1 of that
+| integer.  For integer zero (index 0), the corresponding table element is 8.
+*----------------------------------------------------------------------------*/
+static const u8int softfloat_countLeadingZeros8[256] = {
+    8, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4,
+    3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+    2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+    2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+u8int
+softfloat_countLeadingZeros64( u64int a )
+{
+    u8int count;
+    u32int a32;
+
+    count = 0;
+    a32 = a>>32;
+    if ( ! a32 ) {
+        count = 32;
+        a32 = a;
+    }
+    /*------------------------------------------------------------------------
+    | From here, result is current count + count leading zeros of `a32'.
+    *------------------------------------------------------------------------*/
+    if ( a32 < 0x10000 ) {
+        count += 16;
+        a32 <<= 16;
+    }
+    if ( a32 < 0x1000000 ) {
+        count += 8;
+        a32 <<= 8;
+    }
+    count += softfloat_countLeadingZeros8[a32>>24];
+    return count;
+
+}
+
+int
+softfloat_normExtF80SigM( u64int *sigPtr )
+{
+    u64int sig;
+    s8int shiftDist;
+
+    sig = *sigPtr;
+    shiftDist = softfloat_countLeadingZeros64( sig );
+    *sigPtr = sig<<shiftDist;
+    return -shiftDist;
+
+}
+
+u8int
+softfloat_countLeadingZeros32( u32int a )
+{
+    u8int count;
+
+    count = 0;
+    if ( a < 0x10000 ) {
+        count = 16;
+        a <<= 16;
+    }
+    if ( a < 0x1000000 ) {
+        count += 8;
+        a <<= 8;
+    }
+    count += softfloat_countLeadingZeros8[a>>24];
+    return count;
+
+}
+
+struct exp16_sig32
+softfloat_normSubnormalF32Sig( u32int sig )
+{
+    s8int shiftDist;
+    struct exp16_sig32 z;
+
+    shiftDist = softfloat_countLeadingZeros32( sig ) - 8;
+    z.exp = 1 - shiftDist;
+    z.sig = sig<<shiftDist;
+    return z;
+
+}
+
+u32int
+softfloat_shiftRightJam32( u32int a, u16int dist )
+{
+
+    return
+        (dist < 31) ? a>>dist | ((u32int) (a<<(-dist & 31)) != 0) : (a != 0);
+
+}
+
+float32_t
+ softfloat_roundPackToF32( bool sign, s16int exp, u32int sig )
+{
+    u8int roundingMode;
+    bool roundNearEven;
+    u8int roundIncrement, roundBits;
+    bool isTiny;
+    u32int uiZ;
+    union ui32_f32 uZ;
+
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+    roundingMode = softfloat_roundingMode;
+    roundNearEven = (roundingMode == softfloat_round_near_even);
+    roundIncrement = 0x40;
+    if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) {
+        roundIncrement =
+            (roundingMode
+                 == (sign ? softfloat_round_min : softfloat_round_max))
+                ? 0x7F
+                : 0;
+    }
+    roundBits = sig & 0x7F;
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+    if ( 0xFD <= (unsigned int) exp ) {
+        if ( exp < 0 ) {
+            /*----------------------------------------------------------------
+            *----------------------------------------------------------------*/
+            isTiny =
+                (softfloat_detectTininess == softfloat_tininess_beforeRounding)
+                    || (exp < -1) || (sig + roundIncrement < 0x80000000);
+            sig = softfloat_shiftRightJam32( sig, -exp );
+            exp = 0;
+            roundBits = sig & 0x7F;
+            if ( isTiny && roundBits ) {
+                softfloat_raiseFlags( softfloat_flag_underflow );
+            }
+        } else if ( (0xFD < exp) || (0x80000000 <= sig + roundIncrement) ) {
+            /*----------------------------------------------------------------
+            *----------------------------------------------------------------*/
+            softfloat_raiseFlags(
+                softfloat_flag_overflow | softfloat_flag_inexact );
+            uiZ = packToF32UI( sign, 0xFF, 0 ) - ! roundIncrement;
+            goto uiZ;
+        }
+    }
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+    sig = (sig + roundIncrement)>>7;
+    if ( roundBits ) {
+        softfloat_exceptionFlags |= softfloat_flag_inexact;
+#ifdef SOFTFLOAT_ROUND_ODD
+        if ( roundingMode == softfloat_round_odd ) {
+            sig |= 1;
+            goto packReturn;
+        }
+#endif
+    }
+    sig &= ~(u32int) (! (roundBits ^ 0x40) & roundNearEven);
+    if ( ! sig ) exp = 0;
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+ packReturn:
+    uiZ = packToF32UI( sign, exp, sig );
+ uiZ:
+    uZ.ui = uiZ;
+    return uZ.f;
+
+}
+
+void
+f32_to_extF80M( float32_t a, extFloat80_t *zPtr )
+{
+    struct extFloat80M *zSPtr;
+    union ui32_f32 uA;
+    u32int uiA;
+    bool sign;
+    s16int exp;
+    u32int frac;
+    struct commonNaN commonNaN;
+    u16int uiZ64;
+    u32int uiZ32;
+    struct exp16_sig32 normExpSig;
+
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+    zSPtr = (struct extFloat80M *) zPtr;
+    uA.f = a;
+    uiA = uA.ui;
+    sign = signF32UI( uiA );
+    exp  = expF32UI( uiA );
+    frac = fracF32UI( uiA );
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+    if ( exp == 0xFF ) {
+        if ( frac ) {
+            softfloat_f32UIToCommonNaN( uiA, &commonNaN );
+            softfloat_commonNaNToExtF80M( &commonNaN, zSPtr );
+            return;
+        }
+        uiZ64 = packToExtF80UI64( sign, 0x7FFF );
+        uiZ32 = 0x80000000;
+        goto uiZ;
+    }
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+    if ( ! exp ) {
+        if ( ! frac ) {
+            uiZ64 = packToExtF80UI64( sign, 0 );
+            uiZ32 = 0;
+            goto uiZ;
+        }
+        normExpSig = softfloat_normSubnormalF32Sig( frac );
+        exp = normExpSig.exp;
+        frac = normExpSig.sig;
+    }
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+    uiZ64 = packToExtF80UI64( sign, exp + 0x3F80 );
+    uiZ32 = 0x80000000 | (u32int) frac<<8;
+ uiZ:
+    zSPtr->signExp = uiZ64;
+    zSPtr->signif = (u64int) uiZ32<<32;
+
+}
+
+float32_t
+extF80M_to_f32( const extFloat80_t *aPtr )
+{
+    const struct extFloat80M *aSPtr;
+    u16int uiA64;
+    bool sign;
+    s32int exp;
+    u64int sig;
+    struct commonNaN commonNaN;
+    u32int uiZ, sig32;
+    union ui32_f32 uZ;
+
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+    aSPtr = (const struct extFloat80M *) aPtr;
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+    uiA64 = aSPtr->signExp;
+    sign = signExtF80UI64( uiA64 );
+    exp  = expExtF80UI64( uiA64 );
+    sig = aSPtr->signif;
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+    if ( exp == 0x7FFF ) {
+        if ( sig & UINT64_C( 0x7FFFFFFFFFFFFFFF ) ) {
+            softfloat_extF80MToCommonNaN( aSPtr, &commonNaN );
+            uiZ = softfloat_commonNaNToF32UI( &commonNaN );
+        } else {
+            uiZ = packToF32UI( sign, 0xFF, 0 );
+        }
+        goto uiZ;
+    }
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+    if ( ! (sig & UINT64_C( 0x8000000000000000 )) ) {
+        if ( ! sig ) {
+            uiZ = packToF32UI( sign, 0, 0 );
+            goto uiZ;
+        }
+        exp += softfloat_normExtF80SigM( &sig );
+    }
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+    sig32 = softfloat_shortShiftRightJam64( sig, 33 );
+    exp -= 0x3F81;
+    if ( sizeof (s16int) < sizeof (s32int) ) {
+        if ( exp < -0x1000 ) exp = -0x1000;
+    }
+    return softfloat_roundPackToF32( sign, exp, sig32 );
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+ uiZ:
+    uZ.ui = uiZ;
+    return uZ.f;
+
+}
+
+struct uint64_extra
+ softfloat_shiftRightJam64Extra(
+     u64int a, u64int extra, u32int dist )
+{
+    struct uint64_extra z;
+
+    if ( dist < 64 ) {
+        z.v = a>>dist;
+        z.extra = a<<(-dist & 63);
+    } else {
+        z.v = 0;
+        z.extra = (dist == 64) ? a : (a != 0);
+    }
+    z.extra |= (extra != 0);
+    return z;
+
+}
+
+struct uint128
+ softfloat_shiftRightJam128( u64int a64, u64int a0, u32int dist )
+{
+    u8int u8NegDist;
+    struct uint128 z;
+
+    if ( dist < 64 ) {
+        u8NegDist = -dist;
+        z.v64 = a64>>dist;
+        z.v0 =
+            a64<<(u8NegDist & 63) | a0>>dist
+                | ((u64int) (a0<<(u8NegDist & 63)) != 0);
+    } else {
+        z.v64 = 0;
+        z.v0 =
+            (dist < 127)
+                ? a64>>(dist & 63)
+                      | (((a64 & (((u64int) 1<<(dist & 63)) - 1)) | a0)
+                             != 0)
+                : ((a64 | a0) != 0);
+    }
+    return z;
+
+}
+
+struct uint64_extra
+ softfloat_shortShiftRightJam64Extra(
+     u64int a, u64int extra, u8int dist )
+{
+    struct uint64_extra z;
+
+    z.v = a>>dist;
+    z.extra = a<<(-dist & 63) | (extra != 0);
+    return z;
+
+}
+
+struct exp32_sig64
+softfloat_normSubnormalExtF80Sig( u64int sig )
+{
+    s8int shiftDist;
+    struct exp32_sig64 z;
+
+    shiftDist = softfloat_countLeadingZeros64( sig );
+    z.exp = -shiftDist;
+    z.sig = sig<<shiftDist;
+    return z;
+
+}
+
+struct uint128
+ softfloat_sub128( u64int a64, u64int a0, u64int b64, u64int b0 )
+{
+    struct uint128 z;
+
+    z.v0 = a0 - b0;
+    z.v64 = a64 - b64 - (a0 < b0);
+    return z;
+
+}
+
+u64int
+softfloat_shiftRightJam64( u64int a, u32int dist )
+{
+    return
+        (dist < 63) ? a>>dist | ((u64int) (a<<(-dist & 63)) != 0) : (a != 0);
+
+}
+
+struct uint128
+ softfloat_shortShiftLeft128( u64int a64, u64int a0, u8int dist )
+{
+    struct uint128 z;
+
+    z.v64 = a64<<dist | a0>>(-dist & 63);
+    z.v0 = a0<<dist;
+    return z;
+
+}
+
+extFloat80_t
+ softfloat_normRoundPackToExtF80(
+     bool sign,
+     s32int exp,
+     u64int sig,
+     u64int sigExtra,
+     u8int roundingPrecision
+ )
+{
+    s8int shiftDist;
+    struct uint128 sig128;
+
+    if ( ! sig ) {
+        exp -= 64;
+        sig = sigExtra;
+        sigExtra = 0;
+    }
+    shiftDist = softfloat_countLeadingZeros64( sig );
+    exp -= shiftDist;
+    if ( shiftDist ) {
+        sig128 = softfloat_shortShiftLeft128( sig, sigExtra, shiftDist );
+        sig = sig128.v64;
+        sigExtra = sig128.v0;
+    }
+    return
+        softfloat_roundPackToExtF80(
+            sign, exp, sig, sigExtra, roundingPrecision );
+
+}
+
+extFloat80_t
+ softfloat_roundPackToExtF80(
+     bool sign,
+     s32int exp,
+     u64int sig,
+     u64int sigExtra,
+     u8int roundingPrecision
+ )
+{
+    u8int roundingMode;
+    bool roundNearEven;
+    u64int roundIncrement, roundMask, roundBits;
+    bool isTiny, doIncrement;
+    struct uint64_extra sig64Extra;
+    union { struct extFloat80M s; extFloat80_t f; } uZ;
+
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+    roundingMode = softfloat_roundingMode;
+    roundNearEven = (roundingMode == softfloat_round_near_even);
+    if ( roundingPrecision == 80 ) goto precision80;
+    if ( roundingPrecision == 64 ) {
+        roundIncrement = UINT64_C( 0x0000000000000400 );
+        roundMask = UINT64_C( 0x00000000000007FF );
+    } else if ( roundingPrecision == 32 ) {
+        roundIncrement = UINT64_C( 0x0000008000000000 );
+        roundMask = UINT64_C( 0x000000FFFFFFFFFF );
+    } else {
+        goto precision80;
+    }
+    sig |= (sigExtra != 0);
+    if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) {
+        roundIncrement =
+            (roundingMode
+                 == (sign ? softfloat_round_min : softfloat_round_max))
+                ? roundMask
+                : 0;
+    }
+    roundBits = sig & roundMask;
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+    if ( 0x7FFD <= (u32int) (exp - 1) ) {
+        if ( exp <= 0 ) {
+            /*----------------------------------------------------------------
+            *----------------------------------------------------------------*/
+            isTiny =
+                   (softfloat_detectTininess
+                        == softfloat_tininess_beforeRounding)
+                || (exp < 0)
+                || (sig <= (u64int) (sig + roundIncrement));
+            sig = softfloat_shiftRightJam64( sig, 1 - exp );
+            roundBits = sig & roundMask;
+            if ( roundBits ) {
+                if ( isTiny ) softfloat_raiseFlags( softfloat_flag_underflow );
+                softfloat_exceptionFlags |= softfloat_flag_inexact;
+#ifdef SOFTFLOAT_ROUND_ODD
+                if ( roundingMode == softfloat_round_odd ) {
+                    sig |= roundMask + 1;
+                }
+#endif
+            }
+            sig += roundIncrement;
+            exp = ((sig & UINT64_C( 0x8000000000000000 )) != 0);
+            roundIncrement = roundMask + 1;
+            if ( roundNearEven && (roundBits<<1 == roundIncrement) ) {
+                roundMask |= roundIncrement;
+            }
+            sig &= ~roundMask;
+            goto packReturn;
+        }
+        if (
+               (0x7FFE < exp)
+            || ((exp == 0x7FFE) && ((u64int) (sig + roundIncrement) < sig))
+        ) {
+            goto overflow;
+        }
+    }
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+    if ( roundBits ) {
+        softfloat_exceptionFlags |= softfloat_flag_inexact;
+#ifdef SOFTFLOAT_ROUND_ODD
+        if ( roundingMode == softfloat_round_odd ) {
+            sig = (sig & ~roundMask) | (roundMask + 1);
+            goto packReturn;
+        }
+#endif
+    }
+    sig = (u64int) (sig + roundIncrement);
+    if ( sig < roundIncrement ) {
+        ++exp;
+        sig = UINT64_C( 0x8000000000000000 );
+    }
+    roundIncrement = roundMask + 1;
+    if ( roundNearEven && (roundBits<<1 == roundIncrement) ) {
+        roundMask |= roundIncrement;
+    }
+    sig &= ~roundMask;
+    goto packReturn;
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+ precision80:
+    doIncrement = (UINT64_C( 0x8000000000000000 ) <= sigExtra);
+    if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) {
+        doIncrement =
+            (roundingMode
+                 == (sign ? softfloat_round_min : softfloat_round_max))
+                && sigExtra;
+    }
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+    if ( 0x7FFD <= (u32int) (exp - 1) ) {
+        if ( exp <= 0 ) {
+            /*----------------------------------------------------------------
+            *----------------------------------------------------------------*/
+            isTiny =
+                   (softfloat_detectTininess
+                        == softfloat_tininess_beforeRounding)
+                || (exp < 0)
+                || ! doIncrement
+                || (sig < UINT64_C( 0xFFFFFFFFFFFFFFFF ));
+            sig64Extra =
+                softfloat_shiftRightJam64Extra( sig, sigExtra, 1 - exp );
+            exp = 0;
+            sig = sig64Extra.v;
+            sigExtra = sig64Extra.extra;
+            if ( sigExtra ) {
+                if ( isTiny ) softfloat_raiseFlags( softfloat_flag_underflow );
+                softfloat_exceptionFlags |= softfloat_flag_inexact;
+#ifdef SOFTFLOAT_ROUND_ODD
+                if ( roundingMode == softfloat_round_odd ) {
+                    sig |= 1;
+                    goto packReturn;
+                }
+#endif
+            }
+            doIncrement = (UINT64_C( 0x8000000000000000 ) <= sigExtra);
+            if (
+                ! roundNearEven
+                    && (roundingMode != softfloat_round_near_maxMag)
+            ) {
+                doIncrement =
+                    (roundingMode
+                         == (sign ? softfloat_round_min : softfloat_round_max))
+                        && sigExtra;
+            }
+            if ( doIncrement ) {
+                ++sig;
+                sig &=
+                    ~(u64int)
+                         (! (sigExtra & UINT64_C( 0x7FFFFFFFFFFFFFFF ))
+                              & roundNearEven);
+                exp = ((sig & UINT64_C( 0x8000000000000000 )) != 0);
+            }
+            goto packReturn;
+        }
+        if (
+               (0x7FFE < exp)
+            || ((exp == 0x7FFE) && (sig == UINT64_C( 0xFFFFFFFFFFFFFFFF ))
+                    && doIncrement)
+        ) {
+            /*----------------------------------------------------------------
+            *----------------------------------------------------------------*/
+            roundMask = 0;
+ overflow:
+            softfloat_raiseFlags(
+                softfloat_flag_overflow | softfloat_flag_inexact );
+            if (
+                   roundNearEven
+                || (roundingMode == softfloat_round_near_maxMag)
+                || (roundingMode
+                        == (sign ? softfloat_round_min : softfloat_round_max))
+            ) {
+                exp = 0x7FFF;
+                sig = UINT64_C( 0x8000000000000000 );
+            } else {
+                exp = 0x7FFE;
+                sig = ~roundMask;
+            }
+            goto packReturn;
+        }
+    }
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+    if ( sigExtra ) {
+        softfloat_exceptionFlags |= softfloat_flag_inexact;
+#ifdef SOFTFLOAT_ROUND_ODD
+        if ( roundingMode == softfloat_round_odd ) {
+            sig |= 1;
+            goto packReturn;
+        }
+#endif
+    }
+    if ( doIncrement ) {
+        ++sig;
+        if ( ! sig ) {
+            ++exp;
+            sig = UINT64_C( 0x8000000000000000 );
+        } else {
+            sig &=
+                ~(u64int)
+                     (! (sigExtra & UINT64_C( 0x7FFFFFFFFFFFFFFF ))
+                          & roundNearEven);
+        }
+    }
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+ packReturn:
+    uZ.s.signExp = packToExtF80UI64( sign, exp );
+    uZ.s.signif = sig;
+    return uZ.f;
+
+}
+
+extFloat80_t
+ softfloat_subMagsExtF80(
+     u16int uiA64,
+     u64int uiA0,
+     u16int uiB64,
+     u64int uiB0,
+     bool signZ
+ )
+{
+    s32int expA;
+    u64int sigA;
+    s32int expB;
+    u64int sigB;
+    s32int expDiff;
+    u16int uiZ64;
+    u64int uiZ0;
+    s32int expZ;
+    u64int sigExtra;
+    struct uint128 sig128, uiZ;
+    union { struct extFloat80M s; extFloat80_t f; } uZ;
+
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+    expA = expExtF80UI64( uiA64 );
+    sigA = uiA0;
+    expB = expExtF80UI64( uiB64 );
+    sigB = uiB0;
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+    expDiff = expA - expB;
+    if ( 0 < expDiff ) goto expABigger;
+    if ( expDiff < 0 ) goto expBBigger;
+    if ( expA == 0x7FFF ) {
+        if ( (sigA | sigB) & UINT64_C( 0x7FFFFFFFFFFFFFFF ) ) {
+            goto propagateNaN;
+        }
+        softfloat_raiseFlags( softfloat_flag_invalid );
+        uiZ64 = defaultNaNExtF80UI64;
+        uiZ0  = defaultNaNExtF80UI0;
+        goto uiZ;
+    }
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+    expZ = expA;
+    if ( ! expZ ) expZ = 1;
+    sigExtra = 0;
+    if ( sigB < sigA ) goto aBigger;
+    if ( sigA < sigB ) goto bBigger;
+    uiZ64 =
+        packToExtF80UI64( (softfloat_roundingMode == softfloat_round_min), 0 );
+    uiZ0 = 0;
+    goto uiZ;
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+ expBBigger:
+    if ( expB == 0x7FFF ) {
+        if ( sigB & UINT64_C( 0x7FFFFFFFFFFFFFFF ) ) goto propagateNaN;
+        uiZ64 = packToExtF80UI64( signZ ^ 1, 0x7FFF );
+        uiZ0  = UINT64_C( 0x8000000000000000 );
+        goto uiZ;
+    }
+    if ( ! expA ) {
+        ++expDiff;
+        sigExtra = 0;
+        if ( ! expDiff ) goto newlyAlignedBBigger;
+    }
+    sig128 = softfloat_shiftRightJam128( sigA, 0, -expDiff );
+    sigA = sig128.v64;
+    sigExtra = sig128.v0;
+ newlyAlignedBBigger:
+    expZ = expB;
+ bBigger:
+    signZ = ! signZ;
+    sig128 = softfloat_sub128( sigB, 0, sigA, sigExtra );
+    goto normRoundPack;
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+ expABigger:
+    if ( expA == 0x7FFF ) {
+        if ( sigA & UINT64_C( 0x7FFFFFFFFFFFFFFF ) ) goto propagateNaN;
+        uiZ64 = uiA64;
+        uiZ0  = uiA0;
+        goto uiZ;
+    }
+    if ( ! expB ) {
+        --expDiff;
+        sigExtra = 0;
+        if ( ! expDiff ) goto newlyAlignedABigger;
+    }
+    sig128 = softfloat_shiftRightJam128( sigB, 0, expDiff );
+    sigB = sig128.v64;
+    sigExtra = sig128.v0;
+ newlyAlignedABigger:
+    expZ = expA;
+ aBigger:
+    sig128 = softfloat_sub128( sigA, 0, sigB, sigExtra );
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+ normRoundPack:
+    return
+        softfloat_normRoundPackToExtF80(
+            signZ, expZ, sig128.v64, sig128.v0, extF80_roundingPrecision );
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+ propagateNaN:
+    uiZ = softfloat_propagateNaNExtF80UI( uiA64, uiA0, uiB64, uiB0 );
+    uiZ64 = uiZ.v64;
+    uiZ0  = uiZ.v0;
+ uiZ:
+    uZ.s.signExp = uiZ64;
+    uZ.s.signif  = uiZ0;
+    return uZ.f;
+
+}
+
+extFloat80_t
+ softfloat_addMagsExtF80(
+     u16int uiA64,
+     u64int uiA0,
+     u16int uiB64,
+     u64int uiB0,
+     bool signZ
+ )
+{
+    s32int expA;
+    u64int sigA;
+    s32int expB;
+    u64int sigB;
+    s32int expDiff;
+    u16int uiZ64;
+    u64int uiZ0, sigZ, sigZExtra;
+    struct exp32_sig64 normExpSig;
+    s32int expZ;
+    struct uint64_extra sig64Extra;
+    struct uint128 uiZ;
+    union { struct extFloat80M s; extFloat80_t f; } uZ;
+
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+    expA = expExtF80UI64( uiA64 );
+    sigA = uiA0;
+    expB = expExtF80UI64( uiB64 );
+    sigB = uiB0;
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+    expDiff = expA - expB;
+    if ( ! expDiff ) {
+        if ( expA == 0x7FFF ) {
+            if ( (sigA | sigB) & UINT64_C( 0x7FFFFFFFFFFFFFFF ) ) {
+                goto propagateNaN;
+            }
+            uiZ64 = uiA64;
+            uiZ0  = uiA0;
+            goto uiZ;
+        }
+        sigZ = sigA + sigB;
+        sigZExtra = 0;
+        if ( ! expA ) {
+            normExpSig = softfloat_normSubnormalExtF80Sig( sigZ );
+            expZ = normExpSig.exp + 1;
+            sigZ = normExpSig.sig;
+            goto roundAndPack;
+        }
+        expZ = expA;
+        goto shiftRight1;
+    }
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+    if ( expDiff < 0 ) {
+        if ( expB == 0x7FFF ) {
+            if ( sigB & UINT64_C( 0x7FFFFFFFFFFFFFFF ) ) goto propagateNaN;
+            uiZ64 = packToExtF80UI64( signZ, 0x7FFF );
+            uiZ0  = uiB0;
+            goto uiZ;
+        }
+        expZ = expB;
+        if ( ! expA ) {
+            ++expDiff;
+            sigZExtra = 0;
+            if ( ! expDiff ) goto newlyAligned;
+        }
+        sig64Extra = softfloat_shiftRightJam64Extra( sigA, 0, -expDiff );
+        sigA = sig64Extra.v;
+        sigZExtra = sig64Extra.extra;
+    } else {
+        if ( expA == 0x7FFF ) {
+            if ( sigA & UINT64_C( 0x7FFFFFFFFFFFFFFF ) ) goto propagateNaN;
+            uiZ64 = uiA64;
+            uiZ0  = uiA0;
+            goto uiZ;
+        }
+        expZ = expA;
+        if ( ! expB ) {
+            --expDiff;
+            sigZExtra = 0;
+            if ( ! expDiff ) goto newlyAligned;
+        }
+        sig64Extra = softfloat_shiftRightJam64Extra( sigB, 0, expDiff );
+        sigB = sig64Extra.v;
+        sigZExtra = sig64Extra.extra;
+    }
+ newlyAligned:
+    sigZ = sigA + sigB;
+    if ( sigZ & UINT64_C( 0x8000000000000000 ) ) goto roundAndPack;
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+ shiftRight1:
+    sig64Extra = softfloat_shortShiftRightJam64Extra( sigZ, sigZExtra, 1 );
+    sigZ = sig64Extra.v | UINT64_C( 0x8000000000000000 );
+    sigZExtra = sig64Extra.extra;
+    ++expZ;
+ roundAndPack:
+    return
+        softfloat_roundPackToExtF80(
+            signZ, expZ, sigZ, sigZExtra, extF80_roundingPrecision );
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+ propagateNaN:
+    uiZ = softfloat_propagateNaNExtF80UI( uiA64, uiA0, uiB64, uiB0 );
+    uiZ64 = uiZ.v64;
+    uiZ0  = uiZ.v0;
+ uiZ:
+    uZ.s.signExp = uiZ64;
+    uZ.s.signif  = uiZ0;
+    return uZ.f;
+
+}
+
+void
+ extF80M_add(
+     const extFloat80_t *aPtr, const extFloat80_t *bPtr, extFloat80_t *zPtr )
+{
+    const struct extFloat80M *aSPtr, *bSPtr;
+    u16int uiA64;
+    u64int uiA0;
+    bool signA;
+    u16int uiB64;
+    u64int uiB0;
+    bool signB;
+#if ! defined INLINE_LEVEL || (INLINE_LEVEL < 2)
+    extFloat80_t
+        (*magsFuncPtr)(
+            u16int, u64int, u16int, u64int, bool );
+#endif
+
+    aSPtr = (const struct extFloat80M *) aPtr;
+    bSPtr = (const struct extFloat80M *) bPtr;
+    uiA64 = aSPtr->signExp;
+    uiA0  = aSPtr->signif;
+    signA = signExtF80UI64( uiA64 );
+    uiB64 = bSPtr->signExp;
+    uiB0  = bSPtr->signif;
+    signB = signExtF80UI64( uiB64 );
+#if defined INLINE_LEVEL && (2 <= INLINE_LEVEL)
+    if ( signA == signB ) {
+        *zPtr = softfloat_addMagsExtF80( uiA64, uiA0, uiB64, uiB0, signA );
+    } else {
+        *zPtr = softfloat_subMagsExtF80( uiA64, uiA0, uiB64, uiB0, signA );
+    }
+#else
+    magsFuncPtr =
+        (signA == signB) ? softfloat_addMagsExtF80 : softfloat_subMagsExtF80;
+    *zPtr = (*magsFuncPtr)( uiA64, uiA0, uiB64, uiB0, signA );
+#endif
+
+}
+
+void
+ softfloat_addM(
+     u8int size_words,
+     const u32int *aPtr,
+     const u32int *bPtr,
+     u32int *zPtr
+ )
+{
+    unsigned int index, lastIndex;
+    u8int carry;
+    u32int wordA, wordZ;
+
+    index = indexWordLo( size_words );
+    lastIndex = indexWordHi( size_words );
+    carry = 0;
+    for (;;) {
+        wordA = aPtr[index];
+        wordZ = wordA + bPtr[index] + carry;
+        zPtr[index] = wordZ;
+        if ( index == lastIndex ) break;
+        if ( wordZ != wordA ) carry = (wordZ < wordA);
+        index += wordIncr;
+    }
+
+}
+
+/*----------------------------------------------------------------------------
+| This function or macro is the same as 'softfloat_addM' with 'size_words'
+| = 3 (N = 96).
+*----------------------------------------------------------------------------*/
+#define softfloat_add96M( aPtr, bPtr, zPtr ) softfloat_addM( 3, aPtr, bPtr, zPtr )
+
+void
+softfloat_mul64To128M( u64int a, u64int b, u32int *zPtr )
+{
+    u32int a32, a0, b32, b0;
+    u64int z0, mid1, z64, mid;
+
+    a32 = a>>32;
+    a0 = a;
+    b32 = b>>32;
+    b0 = b;
+    z0 = (u64int) a0 * b0;
+    mid1 = (u64int) a32 * b0;
+    mid = mid1 + (u64int) a0 * b32;
+    z64 = (u64int) a32 * b32;
+    z64 += (u64int) (mid < mid1)<<32 | mid>>32;
+    mid <<= 32;
+    z0 += mid;
+    zPtr[indexWord( 4, 1 )] = z0>>32;
+    zPtr[indexWord( 4, 0 )] = z0;
+    z64 += (z0 < mid);
+    zPtr[indexWord( 4, 3 )] = z64>>32;
+    zPtr[indexWord( 4, 2 )] = z64;
+
+}
+
+void
+softfloat_invalidExtF80M( struct extFloat80M *zSPtr )
+{
+
+    softfloat_raiseFlags( softfloat_flag_invalid );
+    zSPtr->signExp = defaultNaNExtF80UI64;
+    zSPtr->signif  = defaultNaNExtF80UI0;
+
+}
+
+bool
+ softfloat_tryPropagateNaNExtF80M(
+     const struct extFloat80M *aSPtr,
+     const struct extFloat80M *bSPtr,
+     struct extFloat80M *zSPtr
+ )
+{
+    u16int ui64;
+    u64int ui0;
+
+    ui64 = aSPtr->signExp;
+    ui0  = aSPtr->signif;
+    if ( isNaNExtF80UI( ui64, ui0 ) ) goto propagateNaN;
+    ui64 = bSPtr->signExp;
+    ui0  = bSPtr->signif;
+    if ( isNaNExtF80UI( ui64, ui0 ) ) goto propagateNaN;
+    return false;
+ propagateNaN:
+    softfloat_propagateNaNExtF80M( aSPtr, bSPtr, zSPtr );
+    return true;
+
+}
+
+void
+ softfloat_shortShiftRightJamM(
+     u8int size_words,
+     const u32int *aPtr,
+     u8int dist,
+     u32int *zPtr
+ )
+{
+    u8int uNegDist;
+    unsigned int index, lastIndex;
+    u32int partWordZ, wordA;
+
+    uNegDist = -dist;
+    index = indexWordLo( size_words );
+    lastIndex = indexWordHi( size_words );
+    wordA = aPtr[index];
+    partWordZ = wordA>>dist;
+    if ( partWordZ<<dist != wordA ) partWordZ |= 1;
+    while ( index != lastIndex ) {
+        wordA = aPtr[index + wordIncr];
+        zPtr[index] = wordA<<(uNegDist & 31) | partWordZ;
+        index += wordIncr;
+        partWordZ = wordA>>dist;
+    }
+    zPtr[index] = partWordZ;
+
+}
+
+/*----------------------------------------------------------------------------
+| This function or macro is the same as 'softfloat_shiftRightJamM' with
+| 'size_words' = 3 (N = 96).
+*----------------------------------------------------------------------------*/
+#define softfloat_shiftRightJam96M( aPtr, dist, zPtr ) softfloat_shiftRightJamM( 3, aPtr, dist, zPtr )
+
+void
+ softfloat_shiftRightJamM(
+     u8int size_words,
+     const u32int *aPtr,
+     u32int dist,
+     u32int *zPtr
+ )
+{
+    u32int wordJam, wordDist, *ptr;
+    u8int i, innerDist;
+
+    wordJam = 0;
+    wordDist = dist>>5;
+    ptr = nil;
+    if ( wordDist ) {
+        if ( size_words < wordDist ) wordDist = size_words;
+        ptr = (u32int *) (aPtr + indexMultiwordLo( size_words, wordDist ));
+        i = wordDist;
+        do {
+            wordJam = *ptr++;
+            if ( wordJam ) break;
+            --i;
+        } while ( i );
+        ptr = zPtr;
+    }
+    if ( wordDist < size_words ) {
+        aPtr += indexMultiwordHiBut( size_words, wordDist );
+        innerDist = dist & 31;
+        if ( innerDist ) {
+            softfloat_shortShiftRightJamM(
+                size_words - wordDist,
+                aPtr,
+                innerDist,
+                zPtr + indexMultiwordLoBut( size_words, wordDist )
+            );
+            if ( ! wordDist ) goto wordJam;
+        } else {
+            aPtr += indexWordLo( size_words - wordDist );
+            ptr = zPtr + indexWordLo( size_words );
+            for ( i = size_words - wordDist; i; --i ) {
+                *ptr = *aPtr;
+                aPtr += wordIncr;
+                ptr += wordIncr;
+            }
+        }
+        ptr = zPtr + indexMultiwordHi( size_words, wordDist );
+    }
+    do {
+        *ptr++ = 0;
+        --wordDist;
+    } while ( wordDist );
+ wordJam:
+    if ( wordJam ) zPtr[indexWordLo( size_words )] |= 1;
+
+}
+
+void
+ softfloat_roundPackMToExtF80M(
+     bool sign,
+     s32int exp,
+     u32int *extSigPtr,
+     u8int roundingPrecision,
+     struct extFloat80M *zSPtr
+ )
+{
+    u8int roundingMode;
+    bool roundNearEven;
+    u64int sig, roundIncrement, roundMask, roundBits;
+    bool isTiny;
+    u32int sigExtra;
+    bool doIncrement;
+
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+    roundingMode = softfloat_roundingMode;
+    roundNearEven = (roundingMode == softfloat_round_near_even);
+    sig =
+        (u64int) extSigPtr[indexWord( 3, 2 )]<<32
+            | extSigPtr[indexWord( 3, 1 )];
+    if ( roundingPrecision == 80 ) goto precision80;
+    if ( roundingPrecision == 64 ) {
+        roundIncrement = UINT64_C( 0x0000000000000400 );
+        roundMask = UINT64_C( 0x00000000000007FF );
+    } else if ( roundingPrecision == 32 ) {
+        roundIncrement = UINT64_C( 0x0000008000000000 );
+        roundMask = UINT64_C( 0x000000FFFFFFFFFF );
+    } else {
+        goto precision80;
+    }
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+    if ( extSigPtr[indexWordLo( 3 )] ) sig |= 1;
+    if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) {
+        roundIncrement =
+            (roundingMode
+                 == (sign ? softfloat_round_min : softfloat_round_max))
+                ? roundMask
+                : 0;
+    }
+    roundBits = sig & roundMask;
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+    if ( 0x7FFD <= (u32int) (exp - 1) ) {
+        if ( exp <= 0 ) {
+            /*----------------------------------------------------------------
+            *----------------------------------------------------------------*/
+            isTiny =
+                   (softfloat_detectTininess
+                        == softfloat_tininess_beforeRounding)
+                || (exp < 0)
+                || (sig <= (u64int) (sig + roundIncrement));
+            sig = softfloat_shiftRightJam64( sig, 1 - exp );
+            roundBits = sig & roundMask;
+            if ( roundBits ) {
+                if ( isTiny ) softfloat_raiseFlags( softfloat_flag_underflow );
+                softfloat_exceptionFlags |= softfloat_flag_inexact;
+#ifdef SOFTFLOAT_ROUND_ODD
+                if ( roundingMode == softfloat_round_odd ) {
+                    sig |= roundMask + 1;
+                }
+#endif
+            }
+            sig += roundIncrement;
+            exp = ((sig & UINT64_C( 0x8000000000000000 )) != 0);
+            roundIncrement = roundMask + 1;
+            if ( roundNearEven && (roundBits<<1 == roundIncrement) ) {
+                roundMask |= roundIncrement;
+            }
+            sig &= ~roundMask;
+            goto packReturn;
+        }
+        if (
+               (0x7FFE < exp)
+            || ((exp == 0x7FFE) && ((u64int) (sig + roundIncrement) < sig))
+        ) {
+            goto overflow;
+        }
+    }
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+    if ( roundBits ) {
+        softfloat_exceptionFlags |= softfloat_flag_inexact;
+#ifdef SOFTFLOAT_ROUND_ODD
+        if ( roundingMode == softfloat_round_odd ) {
+            sig = (sig & ~roundMask) | (roundMask + 1);
+            goto packReturn;
+        }
+#endif
+    }
+    sig += roundIncrement;
+    if ( sig < roundIncrement ) {
+        ++exp;
+        sig = UINT64_C( 0x8000000000000000 );
+    }
+    roundIncrement = roundMask + 1;
+    if ( roundNearEven && (roundBits<<1 == roundIncrement) ) {
+        roundMask |= roundIncrement;
+    }
+    sig &= ~roundMask;
+    goto packReturn;
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+ precision80:
+    sigExtra = extSigPtr[indexWordLo( 3 )];
+    doIncrement = (0x80000000 <= sigExtra);
+    if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) {
+        doIncrement =
+            (roundingMode
+                 == (sign ? softfloat_round_min : softfloat_round_max))
+                && sigExtra;
+    }
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+    if ( 0x7FFD <= (u32int) (exp - 1) ) {
+        if ( exp <= 0 ) {
+            /*----------------------------------------------------------------
+            *----------------------------------------------------------------*/
+            isTiny =
+                   (softfloat_detectTininess
+                        == softfloat_tininess_beforeRounding)
+                || (exp < 0)
+                || ! doIncrement
+                || (sig < UINT64_C( 0xFFFFFFFFFFFFFFFF ));
+            softfloat_shiftRightJam96M( extSigPtr, 1 - exp, extSigPtr );
+            exp = 0;
+            sig =
+                (u64int) extSigPtr[indexWord( 3, 2 )]<<32
+                    | extSigPtr[indexWord( 3, 1 )];
+            sigExtra = extSigPtr[indexWordLo( 3 )];
+            if ( sigExtra ) {
+                if ( isTiny ) softfloat_raiseFlags( softfloat_flag_underflow );
+                softfloat_exceptionFlags |= softfloat_flag_inexact;
+#ifdef SOFTFLOAT_ROUND_ODD
+                if ( roundingMode == softfloat_round_odd ) {
+                    sig |= 1;
+                    goto packReturn;
+                }
+#endif
+            }
+            doIncrement = (0x80000000 <= sigExtra);
+            if (
+                ! roundNearEven
+                    && (roundingMode != softfloat_round_near_maxMag)
+            ) {
+                doIncrement =
+                    (roundingMode
+                         == (sign ? softfloat_round_min : softfloat_round_max))
+                        && sigExtra;
+            }
+            if ( doIncrement ) {
+                ++sig;
+                sig &= ~(u64int) (! (sigExtra & 0x7FFFFFFF) & roundNearEven);
+                exp = ((sig & UINT64_C( 0x8000000000000000 )) != 0);
+            }
+            goto packReturn;
+        }
+        if (
+               (0x7FFE < exp)
+            || ((exp == 0x7FFE) && (sig == UINT64_C( 0xFFFFFFFFFFFFFFFF ))
+                    && doIncrement)
+        ) {
+            /*----------------------------------------------------------------
+            *----------------------------------------------------------------*/
+            roundMask = 0;
+ overflow:
+            softfloat_raiseFlags(
+                softfloat_flag_overflow | softfloat_flag_inexact );
+            if (
+                   roundNearEven
+                || (roundingMode == softfloat_round_near_maxMag)
+                || (roundingMode
+                        == (sign ? softfloat_round_min : softfloat_round_max))
+            ) {
+                exp = 0x7FFF;
+                sig = UINT64_C( 0x8000000000000000 );
+            } else {
+                exp = 0x7FFE;
+                sig = ~roundMask;
+            }
+            goto packReturn;
+        }
+    }
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+    if ( sigExtra ) {
+        softfloat_exceptionFlags |= softfloat_flag_inexact;
+#ifdef SOFTFLOAT_ROUND_ODD
+        if ( roundingMode == softfloat_round_odd ) {
+            sig |= 1;
+            goto packReturn;
+        }
+#endif
+    }
+    if ( doIncrement ) {
+        ++sig;
+        if ( ! sig ) {
+            ++exp;
+            sig = UINT64_C( 0x8000000000000000 );
+        } else {
+            sig &= ~(u64int) (! (sigExtra & 0x7FFFFFFF) & roundNearEven);
+        }
+    }
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+ packReturn:
+    zSPtr->signExp = packToExtF80UI64( sign, exp );
+    zSPtr->signif = sig;
+
+}
+
+void
+ extF80M_mul(
+     const extFloat80_t *aPtr, const extFloat80_t *bPtr, extFloat80_t *zPtr )
+{
+    const struct extFloat80M *aSPtr, *bSPtr;
+    struct extFloat80M *zSPtr;
+    u16int uiA64;
+    s32int expA;
+    u16int uiB64;
+    s32int expB;
+    bool signZ;
+    u16int uiZ64;
+    u64int uiZ0, sigA, sigB;
+    s32int expZ;
+    u32int sigProd[4], *extSigZPtr;
+
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+    aSPtr = (const struct extFloat80M *) aPtr;
+    bSPtr = (const struct extFloat80M *) bPtr;
+    zSPtr = (struct extFloat80M *) zPtr;
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+    uiA64 = aSPtr->signExp;
+    expA = expExtF80UI64( uiA64 );
+    uiB64 = bSPtr->signExp;
+    expB = expExtF80UI64( uiB64 );
+    signZ = signExtF80UI64( uiA64 ) ^ signExtF80UI64( uiB64 );
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+    if ( (expA == 0x7FFF) || (expB == 0x7FFF) ) {
+        if ( softfloat_tryPropagateNaNExtF80M( aSPtr, bSPtr, zSPtr ) ) return;
+        if (
+               (! aSPtr->signif && (expA != 0x7FFF))
+            || (! bSPtr->signif && (expB != 0x7FFF))
+        ) {
+            softfloat_invalidExtF80M( zSPtr );
+            return;
+        }
+        uiZ64 = packToExtF80UI64( signZ, 0x7FFF );
+        uiZ0  = UINT64_C( 0x8000000000000000 );
+        goto uiZ;
+    }
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+    if ( ! expA ) expA = 1;
+    sigA = aSPtr->signif;
+    if ( ! (sigA & UINT64_C( 0x8000000000000000 )) ) {
+        if ( ! sigA ) goto zero;
+        expA += softfloat_normExtF80SigM( &sigA );
+    }
+    if ( ! expB ) expB = 1;
+    sigB = bSPtr->signif;
+    if ( ! (sigB & UINT64_C( 0x8000000000000000 )) ) {
+        if ( ! sigB ) goto zero;
+        expB += softfloat_normExtF80SigM( &sigB );
+    }
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+    expZ = expA + expB - 0x3FFE;
+    softfloat_mul64To128M( sigA, sigB, sigProd );
+    if ( sigProd[indexWordLo( 4 )] ) sigProd[indexWord( 4, 1 )] |= 1;
+    extSigZPtr = &sigProd[indexMultiwordHi( 4, 3 )];
+    if ( sigProd[indexWordHi( 4 )] < 0x80000000 ) {
+        --expZ;
+        softfloat_add96M( extSigZPtr, extSigZPtr, extSigZPtr );
+    }
+    softfloat_roundPackMToExtF80M(
+        signZ, expZ, extSigZPtr, extF80_roundingPrecision, zSPtr );
+    return;
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+ zero:
+    uiZ64 = packToExtF80UI64( signZ, 0 );
+    uiZ0  = 0;
+ uiZ:
+    zSPtr->signExp = uiZ64;
+    zSPtr->signif  = uiZ0;
+
+}
--- /dev/null
+++ b/softfloat.h
@@ -1,0 +1,2118 @@
+/*============================================================================
+
+This C header file is part of the SoftFloat IEEE Floating-Point Arithmetic
+Package, Release 3e, by John R. Hauser.
+
+Copyright 2011, 2012, 2013, 2014 The Regents of the University of California.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+    this list of conditions, and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright notice,
+    this list of conditions, and the following disclaimer in the documentation
+    and/or other materials provided with the distribution.
+
+ 3. Neither the name of the University nor the names of its contributors may
+    be used to endorse or promote products derived from this software without
+    specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
+DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+=============================================================================*/
+
+#pragma once
+
+#define SOFTFLOAT_FAST_INT64
+#define LITTLEENDIAN
+#define INLINE_LEVEL 0
+#define SOFTFLOAT_FAST_DIV64TO32
+#define SOFTFLOAT_FAST_DIV32TO16
+#define SOFTFLOAT_ROUND_ODD
+#define INLINE inline
+
+#ifdef SOFTFLOAT_FAST_INT64
+
+#ifdef LITTLEENDIAN
+struct uint128 { u64int v0, v64; };
+struct uint64_extra { u64int extra, v; };
+struct uint128_extra { u64int extra; struct uint128 v; };
+#else
+struct uint128 { u64int v64, v0; };
+struct uint64_extra { u64int v, extra; };
+struct uint128_extra { struct uint128 v; u64int extra; };
+#endif
+
+#endif
+
+/*----------------------------------------------------------------------------
+| These macros are used to isolate the differences in word order between big-
+| endian and little-endian platforms.
+*----------------------------------------------------------------------------*/
+#ifdef LITTLEENDIAN
+#define wordIncr 1
+#define indexWord( total, n ) (n)
+#define indexWordHi( total ) ((total) - 1)
+#define indexWordLo( total ) 0
+#define indexMultiword( total, m, n ) (n)
+#define indexMultiwordHi( total, n ) ((total) - (n))
+#define indexMultiwordLo( total, n ) 0
+#define indexMultiwordHiBut( total, n ) (n)
+#define indexMultiwordLoBut( total, n ) 0
+#define INIT_UINTM4( v3, v2, v1, v0 ) { v0, v1, v2, v3 }
+#else
+#define wordIncr -1
+#define indexWord( total, n ) ((total) - 1 - (n))
+#define indexWordHi( total ) 0
+#define indexWordLo( total ) ((total) - 1)
+#define indexMultiword( total, m, n ) ((total) - 1 - (m))
+#define indexMultiwordHi( total, n ) 0
+#define indexMultiwordLo( total, n ) ((total) - (n))
+#define indexMultiwordHiBut( total, n ) 0
+#define indexMultiwordLoBut( total, n ) (n)
+#define INIT_UINTM4( v3, v2, v1, v0 ) { v3, v2, v1, v0 }
+#endif
+
+#ifndef softfloat_shortShiftRightJam64
+/*----------------------------------------------------------------------------
+| Shifts 'a' right by the number of bits given in 'dist', which must be in
+| the range 1 to 63.  If any nonzero bits are shifted off, they are "jammed"
+| into the least-significant bit of the shifted value by setting the least-
+| significant bit to 1.  This shifted-and-jammed value is returned.
+*----------------------------------------------------------------------------*/
+#if defined INLINE_LEVEL && (2 <= INLINE_LEVEL)
+INLINE
+u64int softfloat_shortShiftRightJam64( u64int a, u8int dist )
+    { return a>>dist | ((a & (((u64int) 1<<dist) - 1)) != 0); }
+#else
+u64int softfloat_shortShiftRightJam64( u64int a, u8int dist );
+#endif
+#endif
+
+#ifndef softfloat_shiftRightJam32
+/*----------------------------------------------------------------------------
+| Shifts 'a' right by the number of bits given in 'dist', which must not
+| be zero.  If any nonzero bits are shifted off, they are "jammed" into the
+| least-significant bit of the shifted value by setting the least-significant
+| bit to 1.  This shifted-and-jammed value is returned.
+|   The value of 'dist' can be arbitrarily large.  In particular, if 'dist' is
+| greater than 32, the result will be either 0 or 1, depending on whether 'a'
+| is zero or nonzero.
+*----------------------------------------------------------------------------*/
+#if defined INLINE_LEVEL && (2 <= INLINE_LEVEL)
+INLINE u32int softfloat_shiftRightJam32( u32int a, u16int dist )
+{
+    return
+        (dist < 31) ? a>>dist | ((u32int) (a<<(-dist & 31)) != 0) : (a != 0);
+}
+#else
+u32int softfloat_shiftRightJam32( u32int a, u16int dist );
+#endif
+#endif
+
+#ifndef softfloat_shiftRightJam64
+/*----------------------------------------------------------------------------
+| Shifts 'a' right by the number of bits given in 'dist', which must not
+| be zero.  If any nonzero bits are shifted off, they are "jammed" into the
+| least-significant bit of the shifted value by setting the least-significant
+| bit to 1.  This shifted-and-jammed value is returned.
+|   The value of 'dist' can be arbitrarily large.  In particular, if 'dist' is
+| greater than 64, the result will be either 0 or 1, depending on whether 'a'
+| is zero or nonzero.
+*----------------------------------------------------------------------------*/
+#if defined INLINE_LEVEL && (3 <= INLINE_LEVEL)
+INLINE u64int softfloat_shiftRightJam64( u64int a, u32int dist )
+{
+    return
+        (dist < 63) ? a>>dist | ((u64int) (a<<(-dist & 63)) != 0) : (a != 0);
+}
+#else
+u64int softfloat_shiftRightJam64( u64int a, u32int dist );
+#endif
+#endif
+
+#ifndef softfloat_countLeadingZeros16
+/*----------------------------------------------------------------------------
+| Returns the number of leading 0 bits before the most-significant 1 bit of
+| 'a'.  If 'a' is zero, 16 is returned.
+*----------------------------------------------------------------------------*/
+#if defined INLINE_LEVEL && (2 <= INLINE_LEVEL)
+INLINE u8int softfloat_countLeadingZeros16( u16int a )
+{
+    u8int count = 8;
+    if ( 0x100 <= a ) {
+        count = 0;
+        a >>= 8;
+    }
+    count += softfloat_countLeadingZeros8[a];
+    return count;
+}
+#else
+u8int softfloat_countLeadingZeros16( u16int a );
+#endif
+#endif
+
+#ifndef softfloat_countLeadingZeros32
+/*----------------------------------------------------------------------------
+| Returns the number of leading 0 bits before the most-significant 1 bit of
+| 'a'.  If 'a' is zero, 32 is returned.
+*----------------------------------------------------------------------------*/
+#if defined INLINE_LEVEL && (3 <= INLINE_LEVEL)
+INLINE u8int softfloat_countLeadingZeros32( u32int a )
+{
+    u8int count = 0;
+    if ( a < 0x10000 ) {
+        count = 16;
+        a <<= 16;
+    }
+    if ( a < 0x1000000 ) {
+        count += 8;
+        a <<= 8;
+    }
+    count += softfloat_countLeadingZeros8[a>>24];
+    return count;
+}
+#else
+u8int softfloat_countLeadingZeros32( u32int a );
+#endif
+#endif
+
+#ifndef softfloat_countLeadingZeros64
+/*----------------------------------------------------------------------------
+| Returns the number of leading 0 bits before the most-significant 1 bit of
+| 'a'.  If 'a' is zero, 64 is returned.
+*----------------------------------------------------------------------------*/
+u8int softfloat_countLeadingZeros64( u64int a );
+#endif
+
+extern const u16int softfloat_approxRecip_1k0s[16];
+extern const u16int softfloat_approxRecip_1k1s[16];
+
+#ifndef softfloat_approxRecip32_1
+/*----------------------------------------------------------------------------
+| Returns an approximation to the reciprocal of the number represented by 'a',
+| where 'a' is interpreted as an unsigned fixed-point number with one integer
+| bit and 31 fraction bits.  The 'a' input must be "normalized", meaning that
+| its most-significant bit (bit 31) must be 1.  Thus, if A is the value of
+| the fixed-point interpretation of 'a', then 1 <= A < 2.  The returned value
+| is interpreted as a pure unsigned fraction, having no integer bits and 32
+| fraction bits.  The approximation returned is never greater than the true
+| reciprocal 1/A, and it differs from the true reciprocal by at most 2.006 ulp
+| (units in the last place).
+*----------------------------------------------------------------------------*/
+#ifdef SOFTFLOAT_FAST_DIV64TO32
+#define softfloat_approxRecip32_1( a ) ((u32int) (UINT64_C( 0x7FFFFFFFFFFFFFFF ) / (u32int) (a)))
+#else
+u32int softfloat_approxRecip32_1( u32int a );
+#endif
+#endif
+
+extern const u16int softfloat_approxRecipSqrt_1k0s[16];
+extern const u16int softfloat_approxRecipSqrt_1k1s[16];
+
+#ifndef softfloat_approxRecipSqrt32_1
+/*----------------------------------------------------------------------------
+| Returns an approximation to the reciprocal of the square root of the number
+| represented by 'a', where 'a' is interpreted as an unsigned fixed-point
+| number either with one integer bit and 31 fraction bits or with two integer
+| bits and 30 fraction bits.  The format of 'a' is determined by 'oddExpA',
+| which must be either 0 or 1.  If 'oddExpA' is 1, 'a' is interpreted as
+| having one integer bit, and if 'oddExpA' is 0, 'a' is interpreted as having
+| two integer bits.  The 'a' input must be "normalized", meaning that its
+| most-significant bit (bit 31) must be 1.  Thus, if A is the value of the
+| fixed-point interpretation of 'a', it follows that 1 <= A < 2 when 'oddExpA'
+| is 1, and 2 <= A < 4 when 'oddExpA' is 0.
+|   The returned value is interpreted as a pure unsigned fraction, having
+| no integer bits and 32 fraction bits.  The approximation returned is never
+| greater than the true reciprocal 1/sqrt(A), and it differs from the true
+| reciprocal by at most 2.06 ulp (units in the last place).  The approximation
+| returned is also always within the range 0.5 to 1; thus, the most-
+| significant bit of the result is always set.
+*----------------------------------------------------------------------------*/
+u32int softfloat_approxRecipSqrt32_1( unsigned int oddExpA, u32int a );
+#endif
+
+#ifdef SOFTFLOAT_FAST_INT64
+
+/*----------------------------------------------------------------------------
+| The following functions are needed only when 'SOFTFLOAT_FAST_INT64' is
+| defined.
+*----------------------------------------------------------------------------*/
+
+#ifndef softfloat_eq128
+/*----------------------------------------------------------------------------
+| Returns true if the 128-bit unsigned integer formed by concatenating 'a64'
+| and 'a0' is equal to the 128-bit unsigned integer formed by concatenating
+| 'b64' and 'b0'.
+*----------------------------------------------------------------------------*/
+#if defined INLINE_LEVEL && (1 <= INLINE_LEVEL)
+INLINE
+bool softfloat_eq128( u64int a64, u64int a0, u64int b64, u64int b0 )
+    { return (a64 == b64) && (a0 == b0); }
+#else
+bool softfloat_eq128( u64int a64, u64int a0, u64int b64, u64int b0 );
+#endif
+#endif
+
+#ifndef softfloat_le128
+/*----------------------------------------------------------------------------
+| Returns true if the 128-bit unsigned integer formed by concatenating 'a64'
+| and 'a0' is less than or equal to the 128-bit unsigned integer formed by
+| concatenating 'b64' and 'b0'.
+*----------------------------------------------------------------------------*/
+#if defined INLINE_LEVEL && (2 <= INLINE_LEVEL)
+INLINE
+bool softfloat_le128( u64int a64, u64int a0, u64int b64, u64int b0 )
+    { return (a64 < b64) || ((a64 == b64) && (a0 <= b0)); }
+#else
+bool softfloat_le128( u64int a64, u64int a0, u64int b64, u64int b0 );
+#endif
+#endif
+
+#ifndef softfloat_lt128
+/*----------------------------------------------------------------------------
+| Returns true if the 128-bit unsigned integer formed by concatenating 'a64'
+| and 'a0' is less than the 128-bit unsigned integer formed by concatenating
+| 'b64' and 'b0'.
+*----------------------------------------------------------------------------*/
+#if defined INLINE_LEVEL && (2 <= INLINE_LEVEL)
+INLINE
+bool softfloat_lt128( u64int a64, u64int a0, u64int b64, u64int b0 )
+    { return (a64 < b64) || ((a64 == b64) && (a0 < b0)); }
+#else
+bool softfloat_lt128( u64int a64, u64int a0, u64int b64, u64int b0 );
+#endif
+#endif
+
+#ifndef softfloat_shortShiftLeft128
+/*----------------------------------------------------------------------------
+| Shifts the 128 bits formed by concatenating 'a64' and 'a0' left by the
+| number of bits given in 'dist', which must be in the range 1 to 63.
+*----------------------------------------------------------------------------*/
+#if defined INLINE_LEVEL && (2 <= INLINE_LEVEL)
+INLINE
+struct uint128
+ softfloat_shortShiftLeft128( u64int a64, u64int a0, u8int dist )
+{
+    struct uint128 z;
+    z.v64 = a64<<dist | a0>>(-dist & 63);
+    z.v0 = a0<<dist;
+    return z;
+}
+#else
+struct uint128
+ softfloat_shortShiftLeft128( u64int a64, u64int a0, u8int dist );
+#endif
+#endif
+
+#ifndef softfloat_shortShiftRight128
+/*----------------------------------------------------------------------------
+| Shifts the 128 bits formed by concatenating 'a64' and 'a0' right by the
+| number of bits given in 'dist', which must be in the range 1 to 63.
+*----------------------------------------------------------------------------*/
+#if defined INLINE_LEVEL && (2 <= INLINE_LEVEL)
+INLINE
+struct uint128
+ softfloat_shortShiftRight128( u64int a64, u64int a0, u8int dist )
+{
+    struct uint128 z;
+    z.v64 = a64>>dist;
+    z.v0 = a64<<(-dist & 63) | a0>>dist;
+    return z;
+}
+#else
+struct uint128
+ softfloat_shortShiftRight128( u64int a64, u64int a0, u8int dist );
+#endif
+#endif
+
+#ifndef softfloat_shortShiftRightJam64Extra
+/*----------------------------------------------------------------------------
+| This function is the same as 'softfloat_shiftRightJam64Extra' (below),
+| except that 'dist' must be in the range 1 to 63.
+*----------------------------------------------------------------------------*/
+#if defined INLINE_LEVEL && (2 <= INLINE_LEVEL)
+INLINE
+struct uint64_extra
+ softfloat_shortShiftRightJam64Extra(
+     u64int a, u64int extra, u8int dist )
+{
+    struct uint64_extra z;
+    z.v = a>>dist;
+    z.extra = a<<(-dist & 63) | (extra != 0);
+    return z;
+}
+#else
+struct uint64_extra
+ softfloat_shortShiftRightJam64Extra(
+     u64int a, u64int extra, u8int dist );
+#endif
+#endif
+
+#ifndef softfloat_shortShiftRightJam128
+/*----------------------------------------------------------------------------
+| Shifts the 128 bits formed by concatenating 'a64' and 'a0' right by the
+| number of bits given in 'dist', which must be in the range 1 to 63.  If any
+| nonzero bits are shifted off, they are "jammed" into the least-significant
+| bit of the shifted value by setting the least-significant bit to 1.  This
+| shifted-and-jammed value is returned.
+*----------------------------------------------------------------------------*/
+#if defined INLINE_LEVEL && (3 <= INLINE_LEVEL)
+INLINE
+struct uint128
+ softfloat_shortShiftRightJam128(
+     u64int a64, u64int a0, u8int dist )
+{
+    u8int negDist = -dist;
+    struct uint128 z;
+    z.v64 = a64>>dist;
+    z.v0 =
+        a64<<(negDist & 63) | a0>>dist
+            | ((u64int) (a0<<(negDist & 63)) != 0);
+    return z;
+}
+#else
+struct uint128
+ softfloat_shortShiftRightJam128(
+     u64int a64, u64int a0, u8int dist );
+#endif
+#endif
+
+#ifndef softfloat_shortShiftRightJam128Extra
+/*----------------------------------------------------------------------------
+| This function is the same as 'softfloat_shiftRightJam128Extra' (below),
+| except that 'dist' must be in the range 1 to 63.
+*----------------------------------------------------------------------------*/
+#if defined INLINE_LEVEL && (3 <= INLINE_LEVEL)
+INLINE
+struct uint128_extra
+ softfloat_shortShiftRightJam128Extra(
+     u64int a64, u64int a0, u64int extra, u8int dist )
+{
+    u8int negDist = -dist;
+    struct uint128_extra z;
+    z.v.v64 = a64>>dist;
+    z.v.v0 = a64<<(negDist & 63) | a0>>dist;
+    z.extra = a0<<(negDist & 63) | (extra != 0);
+    return z;
+}
+#else
+struct uint128_extra
+ softfloat_shortShiftRightJam128Extra(
+     u64int a64, u64int a0, u64int extra, u8int dist );
+#endif
+#endif
+
+#ifndef softfloat_shiftRightJam64Extra
+/*----------------------------------------------------------------------------
+| Shifts the 128 bits formed by concatenating 'a' and 'extra' right by 64
+| _plus_ the number of bits given in 'dist', which must not be zero.  This
+| shifted value is at most 64 nonzero bits and is returned in the 'v' field
+| of the 'struct uint64_extra' result.  The 64-bit 'extra' field of the result
+| contains a value formed as follows from the bits that were shifted off:  The
+| _last_ bit shifted off is the most-significant bit of the 'extra' field, and
+| the other 63 bits of the 'extra' field are all zero if and only if _all_but_
+| _the_last_ bits shifted off were all zero.
+|   (This function makes more sense if 'a' and 'extra' are considered to form
+| an unsigned fixed-point number with binary point between 'a' and 'extra'.
+| This fixed-point value is shifted right by the number of bits given in
+| 'dist', and the integer part of this shifted value is returned in the 'v'
+| field of the result.  The fractional part of the shifted value is modified
+| as described above and returned in the 'extra' field of the result.)
+*----------------------------------------------------------------------------*/
+#if defined INLINE_LEVEL && (4 <= INLINE_LEVEL)
+INLINE
+struct uint64_extra
+ softfloat_shiftRightJam64Extra(
+     u64int a, u64int extra, u32int dist )
+{
+    struct uint64_extra z;
+    if ( dist < 64 ) {
+        z.v = a>>dist;
+        z.extra = a<<(-dist & 63);
+    } else {
+        z.v = 0;
+        z.extra = (dist == 64) ? a : (a != 0);
+    }
+    z.extra |= (extra != 0);
+    return z;
+}
+#else
+struct uint64_extra
+ softfloat_shiftRightJam64Extra(
+     u64int a, u64int extra, u32int dist );
+#endif
+#endif
+
+#ifndef softfloat_shiftRightJam128
+/*----------------------------------------------------------------------------
+| Shifts the 128 bits formed by concatenating 'a64' and 'a0' right by the
+| number of bits given in 'dist', which must not be zero.  If any nonzero bits
+| are shifted off, they are "jammed" into the least-significant bit of the
+| shifted value by setting the least-significant bit to 1.  This shifted-and-
+| jammed value is returned.
+|   The value of 'dist' can be arbitrarily large.  In particular, if 'dist' is
+| greater than 128, the result will be either 0 or 1, depending on whether the
+| original 128 bits are all zeros.
+*----------------------------------------------------------------------------*/
+struct uint128
+ softfloat_shiftRightJam128( u64int a64, u64int a0, u32int dist );
+#endif
+
+#ifndef softfloat_shiftRightJam128Extra
+/*----------------------------------------------------------------------------
+| Shifts the 192 bits formed by concatenating 'a64', 'a0', and 'extra' right
+| by 64 _plus_ the number of bits given in 'dist', which must not be zero.
+| This shifted value is at most 128 nonzero bits and is returned in the 'v'
+| field of the 'struct uint128_extra' result.  The 64-bit 'extra' field of the
+| result contains a value formed as follows from the bits that were shifted
+| off:  The _last_ bit shifted off is the most-significant bit of the 'extra'
+| field, and the other 63 bits of the 'extra' field are all zero if and only
+| if _all_but_the_last_ bits shifted off were all zero.
+|   (This function makes more sense if 'a64', 'a0', and 'extra' are considered
+| to form an unsigned fixed-point number with binary point between 'a0' and
+| 'extra'.  This fixed-point value is shifted right by the number of bits
+| given in 'dist', and the integer part of this shifted value is returned
+| in the 'v' field of the result.  The fractional part of the shifted value
+| is modified as described above and returned in the 'extra' field of the
+| result.)
+*----------------------------------------------------------------------------*/
+struct uint128_extra
+ softfloat_shiftRightJam128Extra(
+     u64int a64, u64int a0, u64int extra, u32int dist );
+#endif
+
+#ifndef softfloat_shiftRightJam256M
+/*----------------------------------------------------------------------------
+| Shifts the 256-bit unsigned integer pointed to by 'aPtr' right by the number
+| of bits given in 'dist', which must not be zero.  If any nonzero bits are
+| shifted off, they are "jammed" into the least-significant bit of the shifted
+| value by setting the least-significant bit to 1.  This shifted-and-jammed
+| value is stored at the location pointed to by 'zPtr'.  Each of 'aPtr' and
+| 'zPtr' points to an array of four 64-bit elements that concatenate in the
+| platform's normal endian order to form a 256-bit integer.
+|   The value of 'dist' can be arbitrarily large.  In particular, if 'dist'
+| is greater than 256, the stored result will be either 0 or 1, depending on
+| whether the original 256 bits are all zeros.
+*----------------------------------------------------------------------------*/
+void
+ softfloat_shiftRightJam256M(
+     const u64int *aPtr, u32int dist, u64int *zPtr );
+#endif
+
+#ifndef softfloat_add128
+/*----------------------------------------------------------------------------
+| Returns the sum of the 128-bit integer formed by concatenating 'a64' and
+| 'a0' and the 128-bit integer formed by concatenating 'b64' and 'b0'.  The
+| addition is modulo 2^128, so any carry out is lost.
+*----------------------------------------------------------------------------*/
+#if defined INLINE_LEVEL && (2 <= INLINE_LEVEL)
+INLINE
+struct uint128
+ softfloat_add128( u64int a64, u64int a0, u64int b64, u64int b0 )
+{
+    struct uint128 z;
+    z.v0 = a0 + b0;
+    z.v64 = a64 + b64 + (z.v0 < a0);
+    return z;
+}
+#else
+struct uint128
+ softfloat_add128( u64int a64, u64int a0, u64int b64, u64int b0 );
+#endif
+#endif
+
+#ifndef softfloat_add256M
+/*----------------------------------------------------------------------------
+| Adds the two 256-bit integers pointed to by 'aPtr' and 'bPtr'.  The addition
+| is modulo 2^256, so any carry out is lost.  The sum is stored at the
+| location pointed to by 'zPtr'.  Each of 'aPtr', 'bPtr', and 'zPtr' points to
+| an array of four 64-bit elements that concatenate in the platform's normal
+| endian order to form a 256-bit integer.
+*----------------------------------------------------------------------------*/
+void
+ softfloat_add256M(
+     const u64int *aPtr, const u64int *bPtr, u64int *zPtr );
+#endif
+
+#ifndef softfloat_sub128
+/*----------------------------------------------------------------------------
+| Returns the difference of the 128-bit integer formed by concatenating 'a64'
+| and 'a0' and the 128-bit integer formed by concatenating 'b64' and 'b0'.
+| The subtraction is modulo 2^128, so any borrow out (carry out) is lost.
+*----------------------------------------------------------------------------*/
+#if defined INLINE_LEVEL && (2 <= INLINE_LEVEL)
+INLINE
+struct uint128
+ softfloat_sub128( u64int a64, u64int a0, u64int b64, u64int b0 )
+{
+    struct uint128 z;
+    z.v0 = a0 - b0;
+    z.v64 = a64 - b64;
+    z.v64 -= (a0 < b0);
+    return z;
+}
+#else
+struct uint128
+ softfloat_sub128( u64int a64, u64int a0, u64int b64, u64int b0 );
+#endif
+#endif
+
+#ifndef softfloat_sub256M
+/*----------------------------------------------------------------------------
+| Subtracts the 256-bit integer pointed to by 'bPtr' from the 256-bit integer
+| pointed to by 'aPtr'.  The addition is modulo 2^256, so any borrow out
+| (carry out) is lost.  The difference is stored at the location pointed to
+| by 'zPtr'.  Each of 'aPtr', 'bPtr', and 'zPtr' points to an array of four
+| 64-bit elements that concatenate in the platform's normal endian order to
+| form a 256-bit integer.
+*----------------------------------------------------------------------------*/
+void
+ softfloat_sub256M(
+     const u64int *aPtr, const u64int *bPtr, u64int *zPtr );
+#endif
+
+#ifndef softfloat_mul64ByShifted32To128
+/*----------------------------------------------------------------------------
+| Returns the 128-bit product of 'a', 'b', and 2^32.
+*----------------------------------------------------------------------------*/
+#if defined INLINE_LEVEL && (3 <= INLINE_LEVEL)
+INLINE struct uint128 softfloat_mul64ByShifted32To128( u64int a, u32int b )
+{
+    u64int mid;
+    struct uint128 z;
+    mid = (u64int) (u32int) a * b;
+    z.v0 = mid<<32;
+    z.v64 = (u64int) (u32int) (a>>32) * b + (mid>>32);
+    return z;
+}
+#else
+struct uint128 softfloat_mul64ByShifted32To128( u64int a, u32int b );
+#endif
+#endif
+
+#ifndef softfloat_mul64To128
+/*----------------------------------------------------------------------------
+| Returns the 128-bit product of 'a' and 'b'.
+*----------------------------------------------------------------------------*/
+struct uint128 softfloat_mul64To128( u64int a, u64int b );
+#endif
+
+#ifndef softfloat_mul128By32
+/*----------------------------------------------------------------------------
+| Returns the product of the 128-bit integer formed by concatenating 'a64' and
+| 'a0', multiplied by 'b'.  The multiplication is modulo 2^128; any overflow
+| bits are discarded.
+*----------------------------------------------------------------------------*/
+#if defined INLINE_LEVEL && (4 <= INLINE_LEVEL)
+INLINE
+struct uint128 softfloat_mul128By32( u64int a64, u64int a0, u32int b )
+{
+    struct uint128 z;
+    u64int mid;
+    u32int carry;
+    z.v0 = a0 * b;
+    mid = (u64int) (u32int) (a0>>32) * b;
+    carry = (u32int) ((u32int) (z.v0>>32) - (u32int) mid);
+    z.v64 = a64 * b + (u32int) ((mid + carry)>>32);
+    return z;
+}
+#else
+struct uint128 softfloat_mul128By32( u64int a64, u64int a0, u32int b );
+#endif
+#endif
+
+#ifndef softfloat_mul128To256M
+/*----------------------------------------------------------------------------
+| Multiplies the 128-bit unsigned integer formed by concatenating 'a64' and
+| 'a0' by the 128-bit unsigned integer formed by concatenating 'b64' and
+| 'b0'.  The 256-bit product is stored at the location pointed to by 'zPtr'.
+| Argument 'zPtr' points to an array of four 64-bit elements that concatenate
+| in the platform's normal endian order to form a 256-bit integer.
+*----------------------------------------------------------------------------*/
+void
+ softfloat_mul128To256M(
+     u64int a64, u64int a0, u64int b64, u64int b0, u64int *zPtr );
+#endif
+
+#else
+
+/*----------------------------------------------------------------------------
+| The following functions are needed only when 'SOFTFLOAT_FAST_INT64' is not
+| defined.
+*----------------------------------------------------------------------------*/
+
+#ifndef softfloat_compare96M
+/*----------------------------------------------------------------------------
+| Compares the two 96-bit unsigned integers pointed to by 'aPtr' and 'bPtr'.
+| Returns -1 if the first integer (A) is less than the second (B); returns 0
+| if the two integers are equal; and returns +1 if the first integer (A)
+| is greater than the second (B).  (The result is thus the signum of A - B.)
+| Each of 'aPtr' and 'bPtr' points to an array of three 32-bit elements that
+| concatenate in the platform's normal endian order to form a 96-bit integer.
+*----------------------------------------------------------------------------*/
+s8int softfloat_compare96M( const u32int *aPtr, const u32int *bPtr );
+#endif
+
+#ifndef softfloat_compare128M
+/*----------------------------------------------------------------------------
+| Compares the two 128-bit unsigned integers pointed to by 'aPtr' and 'bPtr'.
+| Returns -1 if the first integer (A) is less than the second (B); returns 0
+| if the two integers are equal; and returns +1 if the first integer (A)
+| is greater than the second (B).  (The result is thus the signum of A - B.)
+| Each of 'aPtr' and 'bPtr' points to an array of four 32-bit elements that
+| concatenate in the platform's normal endian order to form a 128-bit integer.
+*----------------------------------------------------------------------------*/
+s8int
+ softfloat_compare128M( const u32int *aPtr, const u32int *bPtr );
+#endif
+
+#ifndef softfloat_shortShiftLeft64To96M
+/*----------------------------------------------------------------------------
+| Extends 'a' to 96 bits and shifts the value left by the number of bits given
+| in 'dist', which must be in the range 1 to 31.  The result is stored at the
+| location pointed to by 'zPtr'.  Argument 'zPtr' points to an array of three
+| 32-bit elements that concatenate in the platform's normal endian order to
+| form a 96-bit integer.
+*----------------------------------------------------------------------------*/
+#if defined INLINE_LEVEL && (2 <= INLINE_LEVEL)
+INLINE
+void
+ softfloat_shortShiftLeft64To96M(
+     u64int a, u8int dist, u32int *zPtr )
+{
+    zPtr[indexWord( 3, 0 )] = (u32int) a<<dist;
+    a >>= 32 - dist;
+    zPtr[indexWord( 3, 2 )] = a>>32;
+    zPtr[indexWord( 3, 1 )] = a;
+}
+#else
+void
+ softfloat_shortShiftLeft64To96M(
+     u64int a, u8int dist, u32int *zPtr );
+#endif
+#endif
+
+#ifndef softfloat_shortShiftLeftM
+/*----------------------------------------------------------------------------
+| Shifts the N-bit unsigned integer pointed to by 'aPtr' left by the number
+| of bits given in 'dist', where N = 'size_words' * 32.  The value of 'dist'
+| must be in the range 1 to 31.  Any nonzero bits shifted off are lost.  The
+| shifted N-bit result is stored at the location pointed to by 'zPtr'.  Each
+| of 'aPtr' and 'zPtr' points to a 'size_words'-long array of 32-bit elements
+| that concatenate in the platform's normal endian order to form an N-bit
+| integer.
+*----------------------------------------------------------------------------*/
+void
+ softfloat_shortShiftLeftM(
+     u8int size_words,
+     const u32int *aPtr,
+     u8int dist,
+     u32int *zPtr
+ );
+#endif
+
+#ifndef softfloat_shortShiftLeft96M
+/*----------------------------------------------------------------------------
+| This function or macro is the same as 'softfloat_shortShiftLeftM' with
+| 'size_words' = 3 (N = 96).
+*----------------------------------------------------------------------------*/
+#define softfloat_shortShiftLeft96M( aPtr, dist, zPtr ) softfloat_shortShiftLeftM( 3, aPtr, dist, zPtr )
+#endif
+
+#ifndef softfloat_shortShiftLeft128M
+/*----------------------------------------------------------------------------
+| This function or macro is the same as 'softfloat_shortShiftLeftM' with
+| 'size_words' = 4 (N = 128).
+*----------------------------------------------------------------------------*/
+#define softfloat_shortShiftLeft128M( aPtr, dist, zPtr ) softfloat_shortShiftLeftM( 4, aPtr, dist, zPtr )
+#endif
+
+#ifndef softfloat_shortShiftLeft160M
+/*----------------------------------------------------------------------------
+| This function or macro is the same as 'softfloat_shortShiftLeftM' with
+| 'size_words' = 5 (N = 160).
+*----------------------------------------------------------------------------*/
+#define softfloat_shortShiftLeft160M( aPtr, dist, zPtr ) softfloat_shortShiftLeftM( 5, aPtr, dist, zPtr )
+#endif
+
+#ifndef softfloat_shiftLeftM
+/*----------------------------------------------------------------------------
+| Shifts the N-bit unsigned integer pointed to by 'aPtr' left by the number
+| of bits given in 'dist', where N = 'size_words' * 32.  The value of 'dist'
+| must not be zero.  Any nonzero bits shifted off are lost.  The shifted
+| N-bit result is stored at the location pointed to by 'zPtr'.  Each of 'aPtr'
+| and 'zPtr' points to a 'size_words'-long array of 32-bit elements that
+| concatenate in the platform's normal endian order to form an N-bit integer.
+|   The value of 'dist' can be arbitrarily large.  In particular, if 'dist' is
+| greater than N, the stored result will be 0.
+*----------------------------------------------------------------------------*/
+void
+ softfloat_shiftLeftM(
+     u8int size_words,
+     const u32int *aPtr,
+     u32int dist,
+     u32int *zPtr
+ );
+#endif
+
+#ifndef softfloat_shiftLeft96M
+/*----------------------------------------------------------------------------
+| This function or macro is the same as 'softfloat_shiftLeftM' with
+| 'size_words' = 3 (N = 96).
+*----------------------------------------------------------------------------*/
+#define softfloat_shiftLeft96M( aPtr, dist, zPtr ) softfloat_shiftLeftM( 3, aPtr, dist, zPtr )
+#endif
+
+#ifndef softfloat_shiftLeft128M
+/*----------------------------------------------------------------------------
+| This function or macro is the same as 'softfloat_shiftLeftM' with
+| 'size_words' = 4 (N = 128).
+*----------------------------------------------------------------------------*/
+#define softfloat_shiftLeft128M( aPtr, dist, zPtr ) softfloat_shiftLeftM( 4, aPtr, dist, zPtr )
+#endif
+
+#ifndef softfloat_shiftLeft160M
+/*----------------------------------------------------------------------------
+| This function or macro is the same as 'softfloat_shiftLeftM' with
+| 'size_words' = 5 (N = 160).
+*----------------------------------------------------------------------------*/
+#define softfloat_shiftLeft160M( aPtr, dist, zPtr ) softfloat_shiftLeftM( 5, aPtr, dist, zPtr )
+#endif
+
+#ifndef softfloat_shortShiftRightM
+/*----------------------------------------------------------------------------
+| Shifts the N-bit unsigned integer pointed to by 'aPtr' right by the number
+| of bits given in 'dist', where N = 'size_words' * 32.  The value of 'dist'
+| must be in the range 1 to 31.  Any nonzero bits shifted off are lost.  The
+| shifted N-bit result is stored at the location pointed to by 'zPtr'.  Each
+| of 'aPtr' and 'zPtr' points to a 'size_words'-long array of 32-bit elements
+| that concatenate in the platform's normal endian order to form an N-bit
+| integer.
+*----------------------------------------------------------------------------*/
+void
+ softfloat_shortShiftRightM(
+     u8int size_words,
+     const u32int *aPtr,
+     u8int dist,
+     u32int *zPtr
+ );
+#endif
+
+#ifndef softfloat_shortShiftRight128M
+/*----------------------------------------------------------------------------
+| This function or macro is the same as 'softfloat_shortShiftRightM' with
+| 'size_words' = 4 (N = 128).
+*----------------------------------------------------------------------------*/
+#define softfloat_shortShiftRight128M( aPtr, dist, zPtr ) softfloat_shortShiftRightM( 4, aPtr, dist, zPtr )
+#endif
+
+#ifndef softfloat_shortShiftRight160M
+/*----------------------------------------------------------------------------
+| This function or macro is the same as 'softfloat_shortShiftRightM' with
+| 'size_words' = 5 (N = 160).
+*----------------------------------------------------------------------------*/
+#define softfloat_shortShiftRight160M( aPtr, dist, zPtr ) softfloat_shortShiftRightM( 5, aPtr, dist, zPtr )
+#endif
+
+#ifndef softfloat_shortShiftRightJamM
+/*----------------------------------------------------------------------------
+| Shifts the N-bit unsigned integer pointed to by 'aPtr' right by the number
+| of bits given in 'dist', where N = 'size_words' * 32.  The value of 'dist'
+| must be in the range 1 to 31.  If any nonzero bits are shifted off, they are
+| "jammed" into the least-significant bit of the shifted value by setting the
+| least-significant bit to 1.  This shifted-and-jammed N-bit result is stored
+| at the location pointed to by 'zPtr'.  Each of 'aPtr' and 'zPtr' points
+| to a 'size_words'-long array of 32-bit elements that concatenate in the
+| platform's normal endian order to form an N-bit integer.
+*----------------------------------------------------------------------------*/
+void
+ softfloat_shortShiftRightJamM(
+     u8int, const u32int *, u8int, u32int * );
+#endif
+
+#ifndef softfloat_shortShiftRightJam160M
+/*----------------------------------------------------------------------------
+| This function or macro is the same as 'softfloat_shortShiftRightJamM' with
+| 'size_words' = 5 (N = 160).
+*----------------------------------------------------------------------------*/
+#define softfloat_shortShiftRightJam160M( aPtr, dist, zPtr ) softfloat_shortShiftRightJamM( 5, aPtr, dist, zPtr )
+#endif
+
+#ifndef softfloat_shiftRightM
+/*----------------------------------------------------------------------------
+| Shifts the N-bit unsigned integer pointed to by 'aPtr' right by the number
+| of bits given in 'dist', where N = 'size_words' * 32.  The value of 'dist'
+| must not be zero.  Any nonzero bits shifted off are lost.  The shifted
+| N-bit result is stored at the location pointed to by 'zPtr'.  Each of 'aPtr'
+| and 'zPtr' points to a 'size_words'-long array of 32-bit elements that
+| concatenate in the platform's normal endian order to form an N-bit integer.
+|   The value of 'dist' can be arbitrarily large.  In particular, if 'dist' is
+| greater than N, the stored result will be 0.
+*----------------------------------------------------------------------------*/
+void
+ softfloat_shiftRightM(
+     u8int size_words,
+     const u32int *aPtr,
+     u32int dist,
+     u32int *zPtr
+ );
+#endif
+
+#ifndef softfloat_shiftRight96M
+/*----------------------------------------------------------------------------
+| This function or macro is the same as 'softfloat_shiftRightM' with
+| 'size_words' = 3 (N = 96).
+*----------------------------------------------------------------------------*/
+#define softfloat_shiftRight96M( aPtr, dist, zPtr ) softfloat_shiftRightM( 3, aPtr, dist, zPtr )
+#endif
+
+#ifndef softfloat_shiftRightJamM
+/*----------------------------------------------------------------------------
+| Shifts the N-bit unsigned integer pointed to by 'aPtr' right by the number
+| of bits given in 'dist', where N = 'size_words' * 32.  The value of 'dist'
+| must not be zero.  If any nonzero bits are shifted off, they are "jammed"
+| into the least-significant bit of the shifted value by setting the least-
+| significant bit to 1.  This shifted-and-jammed N-bit result is stored
+| at the location pointed to by 'zPtr'.  Each of 'aPtr' and 'zPtr' points
+| to a 'size_words'-long array of 32-bit elements that concatenate in the
+| platform's normal endian order to form an N-bit integer.
+|   The value of 'dist' can be arbitrarily large.  In particular, if 'dist'
+| is greater than N, the stored result will be either 0 or 1, depending on
+| whether the original N bits are all zeros.
+*----------------------------------------------------------------------------*/
+void
+ softfloat_shiftRightJamM(
+     u8int size_words,
+     const u32int *aPtr,
+     u32int dist,
+     u32int *zPtr
+ );
+#endif
+
+#ifndef softfloat_shiftRightJam128M
+/*----------------------------------------------------------------------------
+| This function or macro is the same as 'softfloat_shiftRightJamM' with
+| 'size_words' = 4 (N = 128).
+*----------------------------------------------------------------------------*/
+#define softfloat_shiftRightJam128M( aPtr, dist, zPtr ) softfloat_shiftRightJamM( 4, aPtr, dist, zPtr )
+#endif
+
+#ifndef softfloat_shiftRightJam160M
+/*----------------------------------------------------------------------------
+| This function or macro is the same as 'softfloat_shiftRightJamM' with
+| 'size_words' = 5 (N = 160).
+*----------------------------------------------------------------------------*/
+#define softfloat_shiftRightJam160M( aPtr, dist, zPtr ) softfloat_shiftRightJamM( 5, aPtr, dist, zPtr )
+#endif
+
+#ifndef softfloat_addM
+/*----------------------------------------------------------------------------
+| Adds the two N-bit integers pointed to by 'aPtr' and 'bPtr', where N =
+| 'size_words' * 32.  The addition is modulo 2^N, so any carry out is lost.
+| The N-bit sum is stored at the location pointed to by 'zPtr'.  Each of
+| 'aPtr', 'bPtr', and 'zPtr' points to a 'size_words'-long array of 32-bit
+| elements that concatenate in the platform's normal endian order to form an
+| N-bit integer.
+*----------------------------------------------------------------------------*/
+void
+ softfloat_addM(
+     u8int size_words,
+     const u32int *aPtr,
+     const u32int *bPtr,
+     u32int *zPtr
+ );
+#endif
+
+#ifndef softfloat_add128M
+/*----------------------------------------------------------------------------
+| This function or macro is the same as 'softfloat_addM' with 'size_words'
+| = 4 (N = 128).
+*----------------------------------------------------------------------------*/
+#define softfloat_add128M( aPtr, bPtr, zPtr ) softfloat_addM( 4, aPtr, bPtr, zPtr )
+#endif
+
+#ifndef softfloat_add160M
+/*----------------------------------------------------------------------------
+| This function or macro is the same as 'softfloat_addM' with 'size_words'
+| = 5 (N = 160).
+*----------------------------------------------------------------------------*/
+#define softfloat_add160M( aPtr, bPtr, zPtr ) softfloat_addM( 5, aPtr, bPtr, zPtr )
+#endif
+
+#ifndef softfloat_addCarryM
+/*----------------------------------------------------------------------------
+| Adds the two N-bit unsigned integers pointed to by 'aPtr' and 'bPtr', where
+| N = 'size_words' * 32, plus 'carry', which must be either 0 or 1.  The N-bit
+| sum (modulo 2^N) is stored at the location pointed to by 'zPtr', and any
+| carry out is returned as the result.  Each of 'aPtr', 'bPtr', and 'zPtr'
+| points to a 'size_words'-long array of 32-bit elements that concatenate in
+| the platform's normal endian order to form an N-bit integer.
+*----------------------------------------------------------------------------*/
+u8int
+ softfloat_addCarryM(
+     u8int size_words,
+     const u32int *aPtr,
+     const u32int *bPtr,
+     u8int carry,
+     u32int *zPtr
+ );
+#endif
+
+#ifndef softfloat_addComplCarryM
+/*----------------------------------------------------------------------------
+| This function or macro is the same as 'softfloat_addCarryM', except that
+| the value of the unsigned integer pointed to by 'bPtr' is bit-wise completed
+| before the addition.
+*----------------------------------------------------------------------------*/
+u8int
+ softfloat_addComplCarryM(
+     u8int size_words,
+     const u32int *aPtr,
+     const u32int *bPtr,
+     u8int carry,
+     u32int *zPtr
+ );
+#endif
+
+#ifndef softfloat_addComplCarry96M
+/*----------------------------------------------------------------------------
+| This function or macro is the same as 'softfloat_addComplCarryM' with
+| 'size_words' = 3 (N = 96).
+*----------------------------------------------------------------------------*/
+#define softfloat_addComplCarry96M( aPtr, bPtr, carry, zPtr ) softfloat_addComplCarryM( 3, aPtr, bPtr, carry, zPtr )
+#endif
+
+#ifndef softfloat_negXM
+/*----------------------------------------------------------------------------
+| Replaces the N-bit unsigned integer pointed to by 'zPtr' by the
+| 2s-complement of itself, where N = 'size_words' * 32.  Argument 'zPtr'
+| points to a 'size_words'-long array of 32-bit elements that concatenate in
+| the platform's normal endian order to form an N-bit integer.
+*----------------------------------------------------------------------------*/
+void softfloat_negXM( u8int size_words, u32int *zPtr );
+#endif
+
+#ifndef softfloat_negX96M
+/*----------------------------------------------------------------------------
+| This function or macro is the same as 'softfloat_negXM' with 'size_words'
+| = 3 (N = 96).
+*----------------------------------------------------------------------------*/
+#define softfloat_negX96M( zPtr ) softfloat_negXM( 3, zPtr )
+#endif
+
+#ifndef softfloat_negX128M
+/*----------------------------------------------------------------------------
+| This function or macro is the same as 'softfloat_negXM' with 'size_words'
+| = 4 (N = 128).
+*----------------------------------------------------------------------------*/
+#define softfloat_negX128M( zPtr ) softfloat_negXM( 4, zPtr )
+#endif
+
+#ifndef softfloat_negX160M
+/*----------------------------------------------------------------------------
+| This function or macro is the same as 'softfloat_negXM' with 'size_words'
+| = 5 (N = 160).
+*----------------------------------------------------------------------------*/
+#define softfloat_negX160M( zPtr ) softfloat_negXM( 5, zPtr )
+#endif
+
+#ifndef softfloat_negX256M
+/*----------------------------------------------------------------------------
+| This function or macro is the same as 'softfloat_negXM' with 'size_words'
+| = 8 (N = 256).
+*----------------------------------------------------------------------------*/
+#define softfloat_negX256M( zPtr ) softfloat_negXM( 8, zPtr )
+#endif
+
+#ifndef softfloat_sub1XM
+/*----------------------------------------------------------------------------
+| Subtracts 1 from the N-bit integer pointed to by 'zPtr', where N =
+| 'size_words' * 32.  The subtraction is modulo 2^N, so any borrow out (carry
+| out) is lost.  Argument 'zPtr' points to a 'size_words'-long array of 32-bit
+| elements that concatenate in the platform's normal endian order to form an
+| N-bit integer.
+*----------------------------------------------------------------------------*/
+void softfloat_sub1XM( u8int size_words, u32int *zPtr );
+#endif
+
+#ifndef softfloat_sub1X96M
+/*----------------------------------------------------------------------------
+| This function or macro is the same as 'softfloat_sub1XM' with 'size_words'
+| = 3 (N = 96).
+*----------------------------------------------------------------------------*/
+#define softfloat_sub1X96M( zPtr ) softfloat_sub1XM( 3, zPtr )
+#endif
+
+#ifndef softfloat_sub1X160M
+/*----------------------------------------------------------------------------
+| This function or macro is the same as 'softfloat_sub1XM' with 'size_words'
+| = 5 (N = 160).
+*----------------------------------------------------------------------------*/
+#define softfloat_sub1X160M( zPtr ) softfloat_sub1XM( 5, zPtr )
+#endif
+
+#ifndef softfloat_subM
+/*----------------------------------------------------------------------------
+| Subtracts the two N-bit integers pointed to by 'aPtr' and 'bPtr', where N =
+| 'size_words' * 32.  The subtraction is modulo 2^N, so any borrow out (carry
+| out) is lost.  The N-bit difference is stored at the location pointed to by
+| 'zPtr'.  Each of 'aPtr', 'bPtr', and 'zPtr' points to a 'size_words'-long
+| array of 32-bit elements that concatenate in the platform's normal endian
+| order to form an N-bit integer.
+*----------------------------------------------------------------------------*/
+void
+ softfloat_subM(
+     u8int size_words,
+     const u32int *aPtr,
+     const u32int *bPtr,
+     u32int *zPtr
+ );
+#endif
+
+#ifndef softfloat_sub96M
+/*----------------------------------------------------------------------------
+| This function or macro is the same as 'softfloat_subM' with 'size_words'
+| = 3 (N = 96).
+*----------------------------------------------------------------------------*/
+#define softfloat_sub96M( aPtr, bPtr, zPtr ) softfloat_subM( 3, aPtr, bPtr, zPtr )
+#endif
+
+#ifndef softfloat_sub128M
+/*----------------------------------------------------------------------------
+| This function or macro is the same as 'softfloat_subM' with 'size_words'
+| = 4 (N = 128).
+*----------------------------------------------------------------------------*/
+#define softfloat_sub128M( aPtr, bPtr, zPtr ) softfloat_subM( 4, aPtr, bPtr, zPtr )
+#endif
+
+#ifndef softfloat_sub160M
+/*----------------------------------------------------------------------------
+| This function or macro is the same as 'softfloat_subM' with 'size_words'
+| = 5 (N = 160).
+*----------------------------------------------------------------------------*/
+#define softfloat_sub160M( aPtr, bPtr, zPtr ) softfloat_subM( 5, aPtr, bPtr, zPtr )
+#endif
+
+#ifndef softfloat_mul64To128M
+/*----------------------------------------------------------------------------
+| Multiplies 'a' and 'b' and stores the 128-bit product at the location
+| pointed to by 'zPtr'.  Argument 'zPtr' points to an array of four 32-bit
+| elements that concatenate in the platform's normal endian order to form a
+| 128-bit integer.
+*----------------------------------------------------------------------------*/
+void softfloat_mul64To128M( u64int a, u64int b, u32int *zPtr );
+#endif
+
+#ifndef softfloat_mul128MTo256M
+/*----------------------------------------------------------------------------
+| Multiplies the two 128-bit unsigned integers pointed to by 'aPtr' and
+| 'bPtr', and stores the 256-bit product at the location pointed to by 'zPtr'.
+| Each of 'aPtr' and 'bPtr' points to an array of four 32-bit elements that
+| concatenate in the platform's normal endian order to form a 128-bit integer.
+| Argument 'zPtr' points to an array of eight 32-bit elements that concatenate
+| to form a 256-bit integer.
+*----------------------------------------------------------------------------*/
+void
+ softfloat_mul128MTo256M(
+     const u32int *aPtr, const u32int *bPtr, u32int *zPtr );
+#endif
+
+#ifndef softfloat_remStepMBy32
+/*----------------------------------------------------------------------------
+| Performs a "remainder reduction step" as follows:  Arguments 'remPtr' and
+| 'bPtr' both point to N-bit unsigned integers, where N = 'size_words' * 32.
+| Defining R and B as the values of those integers, the expression (R<<'dist')
+| - B * q is computed modulo 2^N, and the N-bit result is stored at the
+| location pointed to by 'zPtr'.  Each of 'remPtr', 'bPtr', and 'zPtr' points
+| to a 'size_words'-long array of 32-bit elements that concatenate in the
+| platform's normal endian order to form an N-bit integer.
+*----------------------------------------------------------------------------*/
+void
+ softfloat_remStepMBy32(
+     u8int size_words,
+     const u32int *remPtr,
+     u8int dist,
+     const u32int *bPtr,
+     u32int q,
+     u32int *zPtr
+ );
+#endif
+
+#ifndef softfloat_remStep96MBy32
+/*----------------------------------------------------------------------------
+| This function or macro is the same as 'softfloat_remStepMBy32' with
+| 'size_words' = 3 (N = 96).
+*----------------------------------------------------------------------------*/
+#define softfloat_remStep96MBy32( remPtr, dist, bPtr, q, zPtr ) softfloat_remStepMBy32( 3, remPtr, dist, bPtr, q, zPtr )
+#endif
+
+#ifndef softfloat_remStep128MBy32
+/*----------------------------------------------------------------------------
+| This function or macro is the same as 'softfloat_remStepMBy32' with
+| 'size_words' = 4 (N = 128).
+*----------------------------------------------------------------------------*/
+#define softfloat_remStep128MBy32( remPtr, dist, bPtr, q, zPtr ) softfloat_remStepMBy32( 4, remPtr, dist, bPtr, q, zPtr )
+#endif
+
+#ifndef softfloat_remStep160MBy32
+/*----------------------------------------------------------------------------
+| This function or macro is the same as 'softfloat_remStepMBy32' with
+| 'size_words' = 5 (N = 160).
+*----------------------------------------------------------------------------*/
+#define softfloat_remStep160MBy32( remPtr, dist, bPtr, q, zPtr ) softfloat_remStepMBy32( 5, remPtr, dist, bPtr, q, zPtr )
+#endif
+
+#endif
+
+/*----------------------------------------------------------------------------
+| Types used to pass 16-bit, 32-bit, 64-bit, and 128-bit floating-point
+| arguments and results to/from functions.  These types must be exactly
+| 16 bits, 32 bits, 64 bits, and 128 bits in size, respectively.  Where a
+| platform has "native" support for IEEE-Standard floating-point formats,
+| the types below may, if desired, be defined as aliases for the native types
+| (typically 'float' and 'double', and possibly 'long double').
+*----------------------------------------------------------------------------*/
+
+typedef struct { u16int v; } float16_t;
+typedef struct { u16int v; } bfloat16_t;
+typedef struct { u32int v; } float32_t;
+typedef struct { u64int v; } float64_t;
+typedef struct { u64int v[2]; } float128_t;
+
+/*----------------------------------------------------------------------------
+| The format of an 80-bit extended floating-point number in memory.  This
+| structure must contain a 16-bit field named 'signExp' and a 64-bit field
+| named 'signif'.
+*----------------------------------------------------------------------------*/
+#ifdef LITTLEENDIAN
+struct extFloat80M { u64int signif; u16int signExp; };
+#else
+struct extFloat80M { u16int signExp; u64int signif; };
+#endif
+
+/*----------------------------------------------------------------------------
+| The type used to pass 80-bit extended floating-point arguments and
+| results to/from functions.  This type must have size identical to
+| 'struct extFloat80M'.  Type 'extFloat80_t' can be defined as an alias for
+| 'struct extFloat80M'.  Alternatively, if a platform has "native" support
+| for IEEE-Standard 80-bit extended floating-point, it may be possible,
+| if desired, to define 'extFloat80_t' as an alias for the native type
+| (presumably either 'long double' or a nonstandard compiler-intrinsic type).
+| In that case, the 'signif' and 'signExp' fields of 'struct extFloat80M'
+| must align exactly with the locations in memory of the sign, exponent, and
+| significand of the native type.
+*----------------------------------------------------------------------------*/
+typedef struct extFloat80M extFloat80_t;
+
+union ui16_f16 { u16int ui; float16_t f; };
+union ui16_bf16 { u16int ui; bfloat16_t f; };
+union ui32_f32 { u32int ui; float32_t f; };
+union ui64_f64 { u64int ui; float64_t f; };
+
+#ifdef SOFTFLOAT_FAST_INT64
+union extF80M_extF80 { struct extFloat80M fM; extFloat80_t f; };
+union ui128_f128 { struct uint128 ui; float128_t f; };
+#endif
+
+enum {
+    softfloat_mulAdd_subC    = 1,
+    softfloat_mulAdd_subProd = 2
+};
+
+/*----------------------------------------------------------------------------
+*----------------------------------------------------------------------------*/
+u32int softfloat_roundToUI32( bool, u64int, u8int, bool );
+
+#ifdef SOFTFLOAT_FAST_INT64
+u64int
+ softfloat_roundToUI64(
+     bool, u64int, u64int, u8int, bool );
+#else
+u64int softfloat_roundMToUI64( bool, u32int *, u8int, bool );
+#endif
+
+s32int softfloat_roundToI32( bool, u64int, u8int, bool );
+
+#ifdef SOFTFLOAT_FAST_INT64
+s64int
+ softfloat_roundToI64(
+     bool, u64int, u64int, u8int, bool );
+#else
+s64int softfloat_roundMToI64( bool, u32int *, u8int, bool );
+#endif
+
+/*----------------------------------------------------------------------------
+*----------------------------------------------------------------------------*/
+#define signF16UI( a ) ((bool) ((u16int) (a)>>15))
+#define expF16UI( a ) ((s8int) ((a)>>10) & 0x1F)
+#define fracF16UI( a ) ((a) & 0x03FF)
+#define packToF16UI( sign, exp, sig ) (((u16int) (sign)<<15) + ((u16int) (exp)<<10) + (sig))
+
+#define isNaNF16UI( a ) (((~(a) & 0x7C00) == 0) && ((a) & 0x03FF))
+
+struct exp8_sig16 { s8int exp; u16int sig; };
+struct exp8_sig16 softfloat_normSubnormalF16Sig( u16int );
+
+float16_t softfloat_roundPackToF16( bool, s16int, u16int );
+float16_t softfloat_normRoundPackToF16( bool, s16int, u16int );
+
+float16_t softfloat_addMagsF16( u16int, u16int );
+float16_t softfloat_subMagsF16( u16int, u16int );
+float16_t
+ softfloat_mulAddF16(
+     u16int, u16int, u16int, u8int );
+
+/*----------------------------------------------------------------------------
+*----------------------------------------------------------------------------*/
+#define signBF16UI( a ) ((bool) ((u16int) (a)>>15))
+#define expBF16UI( a ) ((s16int) ((a)>>7) & 0xFF)
+#define fracBF16UI( a ) ((a) & 0x07F)
+#define packToBF16UI( sign, exp, sig ) (((u16int) (sign)<<15) + ((u16int) (exp)<<7) + (sig))
+
+#define isNaNBF16UI( a ) (((~(a) & 0x7FC0) == 0) && ((a) & 0x07F))
+
+bfloat16_t softfloat_roundPackToBF16( bool, s16int, u16int );
+struct exp8_sig16 softfloat_normSubnormalBF16Sig( u16int );
+
+/*----------------------------------------------------------------------------
+*----------------------------------------------------------------------------*/
+#define signF32UI( a ) ((bool) ((u32int) (a)>>31))
+#define expF32UI( a ) ((s16int) ((a)>>23) & 0xFF)
+#define fracF32UI( a ) ((a) & 0x007FFFFF)
+#define packToF32UI( sign, exp, sig ) (((u32int) (sign)<<31) + ((u32int) (exp)<<23) + (sig))
+
+#define isNaNF32UI( a ) (((~(a) & 0x7F800000) == 0) && ((a) & 0x007FFFFF))
+
+struct exp16_sig32 { s16int exp; u32int sig; };
+struct exp16_sig32 softfloat_normSubnormalF32Sig( u32int );
+
+float32_t softfloat_roundPackToF32( bool, s16int, u32int );
+float32_t softfloat_normRoundPackToF32( bool, s16int, u32int );
+
+float32_t softfloat_addMagsF32( u32int, u32int );
+float32_t softfloat_subMagsF32( u32int, u32int );
+float32_t
+ softfloat_mulAddF32(
+     u32int, u32int, u32int, u8int );
+
+/*----------------------------------------------------------------------------
+*----------------------------------------------------------------------------*/
+#define signF64UI( a ) ((bool) ((u64int) (a)>>63))
+#define expF64UI( a ) ((s16int) ((a)>>52) & 0x7FF)
+#define fracF64UI( a ) ((a) & UINT64_C( 0x000FFFFFFFFFFFFF ))
+#define packToF64UI( sign, exp, sig ) ((u64int) (((u64int) (sign)<<63) + ((u64int) (exp)<<52) + (sig)))
+
+#define isNaNF64UI( a ) (((~(a) & UINT64_C( 0x7FF0000000000000 )) == 0) && ((a) & UINT64_C( 0x000FFFFFFFFFFFFF )))
+
+struct exp16_sig64 { s16int exp; u64int sig; };
+struct exp16_sig64 softfloat_normSubnormalF64Sig( u64int );
+
+float64_t softfloat_roundPackToF64( bool, s16int, u64int );
+float64_t softfloat_normRoundPackToF64( bool, s16int, u64int );
+
+float64_t softfloat_addMagsF64( u64int, u64int, bool );
+float64_t softfloat_subMagsF64( u64int, u64int, bool );
+float64_t
+ softfloat_mulAddF64(
+     u64int, u64int, u64int, u8int );
+
+/*----------------------------------------------------------------------------
+*----------------------------------------------------------------------------*/
+#define signExtF80UI64( a64 ) ((bool) ((u16int) (a64)>>15))
+#define expExtF80UI64( a64 ) ((a64) & 0x7FFF)
+#define packToExtF80UI64( sign, exp ) ((u16int) (sign)<<15 | (exp))
+
+#define isNaNExtF80UI( a64, a0 ) ((((a64) & 0x7FFF) == 0x7FFF) && ((a0) & UINT64_C( 0x7FFFFFFFFFFFFFFF )))
+
+#ifdef SOFTFLOAT_FAST_INT64
+
+/*----------------------------------------------------------------------------
+*----------------------------------------------------------------------------*/
+
+struct exp32_sig64 { s32int exp; u64int sig; };
+struct exp32_sig64 softfloat_normSubnormalExtF80Sig( u64int );
+
+extFloat80_t
+ softfloat_roundPackToExtF80(
+     bool, s32int, u64int, u64int, u8int );
+extFloat80_t
+ softfloat_normRoundPackToExtF80(
+     bool, s32int, u64int, u64int, u8int );
+
+extFloat80_t
+ softfloat_addMagsExtF80(
+     u16int, u64int, u16int, u64int, bool );
+extFloat80_t
+ softfloat_subMagsExtF80(
+     u16int, u64int, u16int, u64int, bool );
+
+/*----------------------------------------------------------------------------
+*----------------------------------------------------------------------------*/
+#define signF128UI64( a64 ) ((bool) ((u64int) (a64)>>63))
+#define expF128UI64( a64 ) ((s32int) ((a64)>>48) & 0x7FFF)
+#define fracF128UI64( a64 ) ((a64) & UINT64_C( 0x0000FFFFFFFFFFFF ))
+#define packToF128UI64( sign, exp, sig64 ) (((u64int) (sign)<<63) + ((u64int) (exp)<<48) + (sig64))
+
+#define isNaNF128UI( a64, a0 ) (((~(a64) & UINT64_C( 0x7FFF000000000000 )) == 0) && (a0 || ((a64) & UINT64_C( 0x0000FFFFFFFFFFFF ))))
+
+struct exp32_sig128 { s32int exp; struct uint128 sig; };
+struct exp32_sig128
+ softfloat_normSubnormalF128Sig( u64int, u64int );
+
+float128_t
+ softfloat_roundPackToF128(
+     bool, s32int, u64int, u64int, u64int );
+float128_t
+ softfloat_normRoundPackToF128(
+     bool, s32int, u64int, u64int );
+
+float128_t
+ softfloat_addMagsF128(
+     u64int, u64int, u64int, u64int, bool );
+float128_t
+ softfloat_subMagsF128(
+     u64int, u64int, u64int, u64int, bool );
+float128_t
+ softfloat_mulAddF128(
+     u64int,
+     u64int,
+     u64int,
+     u64int,
+     u64int,
+     u64int,
+     u8int
+ );
+
+#else
+
+/*----------------------------------------------------------------------------
+*----------------------------------------------------------------------------*/
+
+bool
+ softfloat_tryPropagateNaNExtF80M(
+     const struct extFloat80M *,
+     const struct extFloat80M *,
+     struct extFloat80M *
+ );
+void softfloat_invalidExtF80M( struct extFloat80M * );
+
+int softfloat_normExtF80SigM( u64int * );
+
+void
+ softfloat_roundPackMToExtF80M(
+     bool, s32int, u32int *, u8int, struct extFloat80M * );
+void
+ softfloat_normRoundPackMToExtF80M(
+     bool, s32int, u32int *, u8int, struct extFloat80M * );
+
+void
+ softfloat_addExtF80M(
+     const struct extFloat80M *,
+     const struct extFloat80M *,
+     struct extFloat80M *,
+     bool
+ );
+
+int
+ softfloat_compareNonnormExtF80M(
+     const struct extFloat80M *, const struct extFloat80M * );
+
+/*----------------------------------------------------------------------------
+*----------------------------------------------------------------------------*/
+#define signF128UI96( a96 ) ((bool) ((u32int) (a96)>>31))
+#define expF128UI96( a96 ) ((s32int) ((a96)>>16) & 0x7FFF)
+#define fracF128UI96( a96 ) ((a96) & 0x0000FFFF)
+#define packToF128UI96( sign, exp, sig96 ) (((u32int) (sign)<<31) + ((u32int) (exp)<<16) + (sig96))
+
+bool softfloat_isNaNF128M( const u32int * );
+
+bool
+ softfloat_tryPropagateNaNF128M(
+     const u32int *, const u32int *, u32int * );
+void softfloat_invalidF128M( u32int * );
+
+int softfloat_shiftNormSigF128M( const u32int *, u8int, u32int * );
+
+void softfloat_roundPackMToF128M( bool, s32int, u32int *, u32int * );
+void softfloat_normRoundPackMToF128M( bool, s32int, u32int *, u32int * );
+
+void
+ softfloat_addF128M( const u32int *, const u32int *, u32int *, bool );
+void
+ softfloat_mulAddF128M(
+     const u32int *,
+     const u32int *,
+     const u32int *,
+     u32int *,
+     u8int
+ );
+
+#endif
+
+#ifndef THREAD_LOCAL
+#define THREAD_LOCAL
+#endif
+
+/*----------------------------------------------------------------------------
+| Software floating-point underflow tininess-detection mode.
+*----------------------------------------------------------------------------*/
+extern THREAD_LOCAL u8int softfloat_detectTininess;
+enum {
+    softfloat_tininess_beforeRounding = 0,
+    softfloat_tininess_afterRounding  = 1
+};
+
+/*----------------------------------------------------------------------------
+| Software floating-point rounding mode.  (Mode "odd" is supported only if
+| SoftFloat is compiled with macro 'SOFTFLOAT_ROUND_ODD' defined.)
+*----------------------------------------------------------------------------*/
+extern THREAD_LOCAL u8int softfloat_roundingMode;
+enum {
+    softfloat_round_near_even   = 0,
+    softfloat_round_minMag      = 1,
+    softfloat_round_min         = 2,
+    softfloat_round_max         = 3,
+    softfloat_round_near_maxMag = 4,
+    softfloat_round_odd         = 6
+};
+
+/*----------------------------------------------------------------------------
+| Software floating-point exception flags.
+*----------------------------------------------------------------------------*/
+extern THREAD_LOCAL u8int softfloat_exceptionFlags;
+typedef enum {
+    softfloat_flag_inexact   =  1,
+    softfloat_flag_underflow =  2,
+    softfloat_flag_overflow  =  4,
+    softfloat_flag_infinite  =  8,
+    softfloat_flag_invalid   = 16
+} exceptionFlag_t;
+
+/*----------------------------------------------------------------------------
+| Routine to raise any or all of the software floating-point exception flags.
+*----------------------------------------------------------------------------*/
+void softfloat_raiseFlags( u8int );
+
+/*----------------------------------------------------------------------------
+| Integer-to-floating-point conversion routines.
+*----------------------------------------------------------------------------*/
+float16_t ui32_to_f16( u32int );
+float32_t ui32_to_f32( u32int );
+float64_t ui32_to_f64( u32int );
+#ifdef SOFTFLOAT_FAST_INT64
+extFloat80_t ui32_to_extF80( u32int );
+float128_t ui32_to_f128( u32int );
+#endif
+void ui32_to_extF80M( u32int, extFloat80_t * );
+void ui32_to_f128M( u32int, float128_t * );
+float16_t ui64_to_f16( u64int );
+float32_t ui64_to_f32( u64int );
+float64_t ui64_to_f64( u64int );
+#ifdef SOFTFLOAT_FAST_INT64
+extFloat80_t ui64_to_extF80( u64int );
+float128_t ui64_to_f128( u64int );
+#endif
+void ui64_to_extF80M( u64int, extFloat80_t * );
+void ui64_to_f128M( u64int, float128_t * );
+float16_t i32_to_f16( s32int );
+float32_t i32_to_f32( s32int );
+float64_t i32_to_f64( s32int );
+#ifdef SOFTFLOAT_FAST_INT64
+extFloat80_t i32_to_extF80( s32int );
+float128_t i32_to_f128( s32int );
+#endif
+void i32_to_extF80M( s32int, extFloat80_t * );
+void i32_to_f128M( s32int, float128_t * );
+float16_t i64_to_f16( s64int );
+float32_t i64_to_f32( s64int );
+float64_t i64_to_f64( s64int );
+#ifdef SOFTFLOAT_FAST_INT64
+extFloat80_t i64_to_extF80( s64int );
+float128_t i64_to_f128( s64int );
+#endif
+void i64_to_extF80M( s64int, extFloat80_t * );
+void i64_to_f128M( s64int, float128_t * );
+
+/*----------------------------------------------------------------------------
+| 16-bit (half-precision) floating-point operations.
+*----------------------------------------------------------------------------*/
+u32int f16_to_ui32( float16_t, u8int, bool );
+u64int f16_to_ui64( float16_t, u8int, bool );
+s32int f16_to_i32( float16_t, u8int, bool );
+s64int f16_to_i64( float16_t, u8int, bool );
+u32int f16_to_ui32_r_minMag( float16_t, bool );
+u64int f16_to_ui64_r_minMag( float16_t, bool );
+s32int f16_to_i32_r_minMag( float16_t, bool );
+s64int f16_to_i64_r_minMag( float16_t, bool );
+float32_t f16_to_f32( float16_t );
+float64_t f16_to_f64( float16_t );
+#ifdef SOFTFLOAT_FAST_INT64
+extFloat80_t f16_to_extF80( float16_t );
+float128_t f16_to_f128( float16_t );
+#endif
+void f16_to_extF80M( float16_t, extFloat80_t * );
+void f16_to_f128M( float16_t, float128_t * );
+float16_t f16_roundToInt( float16_t, u8int, bool );
+float16_t f16_add( float16_t, float16_t );
+float16_t f16_sub( float16_t, float16_t );
+float16_t f16_mul( float16_t, float16_t );
+float16_t f16_mulAdd( float16_t, float16_t, float16_t );
+float16_t f16_div( float16_t, float16_t );
+float16_t f16_rem( float16_t, float16_t );
+float16_t f16_sqrt( float16_t );
+bool f16_eq( float16_t, float16_t );
+bool f16_le( float16_t, float16_t );
+bool f16_lt( float16_t, float16_t );
+bool f16_eq_signaling( float16_t, float16_t );
+bool f16_le_quiet( float16_t, float16_t );
+bool f16_lt_quiet( float16_t, float16_t );
+bool f16_isSignalingNaN( float16_t );
+
+/*----------------------------------------------------------------------------
+| 16-bit (brain float 16) floating-point operations.
+*----------------------------------------------------------------------------*/
+float32_t bf16_to_f32( bfloat16_t );
+bfloat16_t f32_to_bf16( float32_t );
+bool bf16_isSignalingNaN( bfloat16_t );
+
+/*----------------------------------------------------------------------------
+| 32-bit (single-precision) floating-point operations.
+*----------------------------------------------------------------------------*/
+u32int f32_to_ui32( float32_t, u8int, bool );
+u64int f32_to_ui64( float32_t, u8int, bool );
+s32int f32_to_i32( float32_t, u8int, bool );
+s64int f32_to_i64( float32_t, u8int, bool );
+u32int f32_to_ui32_r_minMag( float32_t, bool );
+u64int f32_to_ui64_r_minMag( float32_t, bool );
+s32int f32_to_i32_r_minMag( float32_t, bool );
+s64int f32_to_i64_r_minMag( float32_t, bool );
+float16_t f32_to_f16( float32_t );
+float64_t f32_to_f64( float32_t );
+#ifdef SOFTFLOAT_FAST_INT64
+extFloat80_t f32_to_extF80( float32_t );
+float128_t f32_to_f128( float32_t );
+#endif
+void f32_to_extF80M( float32_t, extFloat80_t * );
+void f32_to_f128M( float32_t, float128_t * );
+float32_t f32_roundToInt( float32_t, u8int, bool );
+float32_t f32_add( float32_t, float32_t );
+float32_t f32_sub( float32_t, float32_t );
+float32_t f32_mul( float32_t, float32_t );
+float32_t f32_mulAdd( float32_t, float32_t, float32_t );
+float32_t f32_div( float32_t, float32_t );
+float32_t f32_rem( float32_t, float32_t );
+float32_t f32_sqrt( float32_t );
+bool f32_eq( float32_t, float32_t );
+bool f32_le( float32_t, float32_t );
+bool f32_lt( float32_t, float32_t );
+bool f32_eq_signaling( float32_t, float32_t );
+bool f32_le_quiet( float32_t, float32_t );
+bool f32_lt_quiet( float32_t, float32_t );
+bool f32_isSignalingNaN( float32_t );
+
+/*----------------------------------------------------------------------------
+| 64-bit (double-precision) floating-point operations.
+*----------------------------------------------------------------------------*/
+u32int f64_to_ui32( float64_t, u8int, bool );
+u64int f64_to_ui64( float64_t, u8int, bool );
+s32int f64_to_i32( float64_t, u8int, bool );
+s64int f64_to_i64( float64_t, u8int, bool );
+u32int f64_to_ui32_r_minMag( float64_t, bool );
+u64int f64_to_ui64_r_minMag( float64_t, bool );
+s32int f64_to_i32_r_minMag( float64_t, bool );
+s64int f64_to_i64_r_minMag( float64_t, bool );
+float16_t f64_to_f16( float64_t );
+float32_t f64_to_f32( float64_t );
+#ifdef SOFTFLOAT_FAST_INT64
+extFloat80_t f64_to_extF80( float64_t );
+float128_t f64_to_f128( float64_t );
+#endif
+void f64_to_extF80M( float64_t, extFloat80_t * );
+void f64_to_f128M( float64_t, float128_t * );
+float64_t f64_roundToInt( float64_t, u8int, bool );
+float64_t f64_add( float64_t, float64_t );
+float64_t f64_sub( float64_t, float64_t );
+float64_t f64_mul( float64_t, float64_t );
+float64_t f64_mulAdd( float64_t, float64_t, float64_t );
+float64_t f64_div( float64_t, float64_t );
+float64_t f64_rem( float64_t, float64_t );
+float64_t f64_sqrt( float64_t );
+bool f64_eq( float64_t, float64_t );
+bool f64_le( float64_t, float64_t );
+bool f64_lt( float64_t, float64_t );
+bool f64_eq_signaling( float64_t, float64_t );
+bool f64_le_quiet( float64_t, float64_t );
+bool f64_lt_quiet( float64_t, float64_t );
+bool f64_isSignalingNaN( float64_t );
+
+/*----------------------------------------------------------------------------
+| Rounding precision for 80-bit extended double-precision floating-point.
+| Valid values are 32, 64, and 80.
+*----------------------------------------------------------------------------*/
+extern THREAD_LOCAL u8int extF80_roundingPrecision;
+
+/*----------------------------------------------------------------------------
+| 80-bit extended double-precision floating-point operations.
+*----------------------------------------------------------------------------*/
+#ifdef SOFTFLOAT_FAST_INT64
+u32int extF80_to_ui32( extFloat80_t, u8int, bool );
+u64int extF80_to_ui64( extFloat80_t, u8int, bool );
+s32int extF80_to_i32( extFloat80_t, u8int, bool );
+s64int extF80_to_i64( extFloat80_t, u8int, bool );
+u32int extF80_to_ui32_r_minMag( extFloat80_t, bool );
+u64int extF80_to_ui64_r_minMag( extFloat80_t, bool );
+s32int extF80_to_i32_r_minMag( extFloat80_t, bool );
+s64int extF80_to_i64_r_minMag( extFloat80_t, bool );
+float16_t extF80_to_f16( extFloat80_t );
+float32_t extF80_to_f32( extFloat80_t );
+float64_t extF80_to_f64( extFloat80_t );
+float128_t extF80_to_f128( extFloat80_t );
+extFloat80_t extF80_roundToInt( extFloat80_t, u8int, bool );
+extFloat80_t extF80_add( extFloat80_t, extFloat80_t );
+extFloat80_t extF80_sub( extFloat80_t, extFloat80_t );
+extFloat80_t extF80_mul( extFloat80_t, extFloat80_t );
+extFloat80_t extF80_div( extFloat80_t, extFloat80_t );
+extFloat80_t extF80_rem( extFloat80_t, extFloat80_t );
+extFloat80_t extF80_sqrt( extFloat80_t );
+bool extF80_eq( extFloat80_t, extFloat80_t );
+bool extF80_le( extFloat80_t, extFloat80_t );
+bool extF80_lt( extFloat80_t, extFloat80_t );
+bool extF80_eq_signaling( extFloat80_t, extFloat80_t );
+bool extF80_le_quiet( extFloat80_t, extFloat80_t );
+bool extF80_lt_quiet( extFloat80_t, extFloat80_t );
+bool extF80_isSignalingNaN( extFloat80_t );
+#endif
+u32int extF80M_to_ui32( const extFloat80_t *, u8int, bool );
+u64int extF80M_to_ui64( const extFloat80_t *, u8int, bool );
+s32int extF80M_to_i32( const extFloat80_t *, u8int, bool );
+s64int extF80M_to_i64( const extFloat80_t *, u8int, bool );
+u32int extF80M_to_ui32_r_minMag( const extFloat80_t *, bool );
+u64int extF80M_to_ui64_r_minMag( const extFloat80_t *, bool );
+s32int extF80M_to_i32_r_minMag( const extFloat80_t *, bool );
+s64int extF80M_to_i64_r_minMag( const extFloat80_t *, bool );
+float16_t extF80M_to_f16( const extFloat80_t * );
+float32_t extF80M_to_f32( const extFloat80_t * );
+float64_t extF80M_to_f64( const extFloat80_t * );
+void extF80M_to_f128M( const extFloat80_t *, float128_t * );
+void
+ extF80M_roundToInt(
+     const extFloat80_t *, u8int, bool, extFloat80_t * );
+void extF80M_add( const extFloat80_t *, const extFloat80_t *, extFloat80_t * );
+void extF80M_sub( const extFloat80_t *, const extFloat80_t *, extFloat80_t * );
+void extF80M_mul( const extFloat80_t *, const extFloat80_t *, extFloat80_t * );
+void extF80M_div( const extFloat80_t *, const extFloat80_t *, extFloat80_t * );
+void extF80M_rem( const extFloat80_t *, const extFloat80_t *, extFloat80_t * );
+void extF80M_sqrt( const extFloat80_t *, extFloat80_t * );
+bool extF80M_eq( const extFloat80_t *, const extFloat80_t * );
+bool extF80M_le( const extFloat80_t *, const extFloat80_t * );
+bool extF80M_lt( const extFloat80_t *, const extFloat80_t * );
+bool extF80M_eq_signaling( const extFloat80_t *, const extFloat80_t * );
+bool extF80M_le_quiet( const extFloat80_t *, const extFloat80_t * );
+bool extF80M_lt_quiet( const extFloat80_t *, const extFloat80_t * );
+bool extF80M_isSignalingNaN( const extFloat80_t * );
+
+/*----------------------------------------------------------------------------
+| 128-bit (quadruple-precision) floating-point operations.
+*----------------------------------------------------------------------------*/
+#ifdef SOFTFLOAT_FAST_INT64
+u32int f128_to_ui32( float128_t, u8int, bool );
+u64int f128_to_ui64( float128_t, u8int, bool );
+s32int f128_to_i32( float128_t, u8int, bool );
+s64int f128_to_i64( float128_t, u8int, bool );
+u32int f128_to_ui32_r_minMag( float128_t, bool );
+u64int f128_to_ui64_r_minMag( float128_t, bool );
+s32int f128_to_i32_r_minMag( float128_t, bool );
+s64int f128_to_i64_r_minMag( float128_t, bool );
+float16_t f128_to_f16( float128_t );
+float32_t f128_to_f32( float128_t );
+float64_t f128_to_f64( float128_t );
+extFloat80_t f128_to_extF80( float128_t );
+float128_t f128_roundToInt( float128_t, u8int, bool );
+float128_t f128_add( float128_t, float128_t );
+float128_t f128_sub( float128_t, float128_t );
+float128_t f128_mul( float128_t, float128_t );
+float128_t f128_mulAdd( float128_t, float128_t, float128_t );
+float128_t f128_div( float128_t, float128_t );
+float128_t f128_rem( float128_t, float128_t );
+float128_t f128_sqrt( float128_t );
+bool f128_eq( float128_t, float128_t );
+bool f128_le( float128_t, float128_t );
+bool f128_lt( float128_t, float128_t );
+bool f128_eq_signaling( float128_t, float128_t );
+bool f128_le_quiet( float128_t, float128_t );
+bool f128_lt_quiet( float128_t, float128_t );
+bool f128_isSignalingNaN( float128_t );
+#endif
+u32int f128M_to_ui32( const float128_t *, u8int, bool );
+u64int f128M_to_ui64( const float128_t *, u8int, bool );
+s32int f128M_to_i32( const float128_t *, u8int, bool );
+s64int f128M_to_i64( const float128_t *, u8int, bool );
+u32int f128M_to_ui32_r_minMag( const float128_t *, bool );
+u64int f128M_to_ui64_r_minMag( const float128_t *, bool );
+s32int f128M_to_i32_r_minMag( const float128_t *, bool );
+s64int f128M_to_i64_r_minMag( const float128_t *, bool );
+float16_t f128M_to_f16( const float128_t * );
+float32_t f128M_to_f32( const float128_t * );
+float64_t f128M_to_f64( const float128_t * );
+void f128M_to_extF80M( const float128_t *, extFloat80_t * );
+void f128M_roundToInt( const float128_t *, u8int, bool, float128_t * );
+void f128M_add( const float128_t *, const float128_t *, float128_t * );
+void f128M_sub( const float128_t *, const float128_t *, float128_t * );
+void f128M_mul( const float128_t *, const float128_t *, float128_t * );
+void
+ f128M_mulAdd(
+     const float128_t *, const float128_t *, const float128_t *, float128_t *
+ );
+void f128M_div( const float128_t *, const float128_t *, float128_t * );
+void f128M_rem( const float128_t *, const float128_t *, float128_t * );
+void f128M_sqrt( const float128_t *, float128_t * );
+bool f128M_eq( const float128_t *, const float128_t * );
+bool f128M_le( const float128_t *, const float128_t * );
+bool f128M_lt( const float128_t *, const float128_t * );
+bool f128M_eq_signaling( const float128_t *, const float128_t * );
+bool f128M_le_quiet( const float128_t *, const float128_t * );
+bool f128M_lt_quiet( const float128_t *, const float128_t * );
+bool f128M_isSignalingNaN( const float128_t * );
+
+/*----------------------------------------------------------------------------
+| Default value for 'softfloat_detectTininess'.
+*----------------------------------------------------------------------------*/
+#define init_detectTininess softfloat_tininess_afterRounding
+
+/*----------------------------------------------------------------------------
+| The values to return on conversions to 32-bit integer formats that raise an
+| invalid exception.
+*----------------------------------------------------------------------------*/
+#define ui32_fromPosOverflow 0xFFFFFFFF
+#define ui32_fromNegOverflow 0xFFFFFFFF
+#define ui32_fromNaN         0xFFFFFFFF
+#define i32_fromPosOverflow  (-0x7FFFFFFF - 1)
+#define i32_fromNegOverflow  (-0x7FFFFFFF - 1)
+#define i32_fromNaN          (-0x7FFFFFFF - 1)
+
+/*----------------------------------------------------------------------------
+| The values to return on conversions to 64-bit integer formats that raise an
+| invalid exception.
+*----------------------------------------------------------------------------*/
+#define ui64_fromPosOverflow UINT64_C( 0xFFFFFFFFFFFFFFFF )
+#define ui64_fromNegOverflow UINT64_C( 0xFFFFFFFFFFFFFFFF )
+#define ui64_fromNaN         UINT64_C( 0xFFFFFFFFFFFFFFFF )
+#define i64_fromPosOverflow  (-INT64_C( 0x7FFFFFFFFFFFFFFF ) - 1)
+#define i64_fromNegOverflow  (-INT64_C( 0x7FFFFFFFFFFFFFFF ) - 1)
+#define i64_fromNaN          (-INT64_C( 0x7FFFFFFFFFFFFFFF ) - 1)
+
+/*----------------------------------------------------------------------------
+| "Common NaN" structure, used to transfer NaN representations from one format
+| to another.
+*----------------------------------------------------------------------------*/
+struct commonNaN {
+    bool sign;
+#ifdef LITTLEENDIAN
+    u64int v0, v64;
+#else
+    u64int v64, v0;
+#endif
+};
+
+/*----------------------------------------------------------------------------
+| The bit pattern for a default generated 16-bit floating-point NaN.
+*----------------------------------------------------------------------------*/
+#define defaultNaNF16UI 0xFE00
+
+/*----------------------------------------------------------------------------
+| Returns true when 16-bit unsigned integer 'uiA' has the bit pattern of a
+| 16-bit floating-point signaling NaN.
+| Note:  This macro evaluates its argument more than once.
+*----------------------------------------------------------------------------*/
+#define softfloat_isSigNaNF16UI( uiA ) ((((uiA) & 0x7E00) == 0x7C00) && ((uiA) & 0x01FF))
+
+/*----------------------------------------------------------------------------
+| Assuming 'uiA' has the bit pattern of a 16-bit floating-point NaN, converts
+| this NaN to the common NaN form, and stores the resulting common NaN at the
+| location pointed to by 'zPtr'.  If the NaN is a signaling NaN, the invalid
+| exception is raised.
+*----------------------------------------------------------------------------*/
+void softfloat_f16UIToCommonNaN( u16int uiA, struct commonNaN *zPtr );
+
+/*----------------------------------------------------------------------------
+| Converts the common NaN pointed to by 'aPtr' into a 16-bit floating-point
+| NaN, and returns the bit pattern of this value as an unsigned integer.
+*----------------------------------------------------------------------------*/
+u16int softfloat_commonNaNToF16UI( const struct commonNaN *aPtr );
+
+/*----------------------------------------------------------------------------
+| Interpreting 'uiA' and 'uiB' as the bit patterns of two 16-bit floating-
+| point values, at least one of which is a NaN, returns the bit pattern of
+| the combined NaN result.  If either 'uiA' or 'uiB' has the pattern of a
+| signaling NaN, the invalid exception is raised.
+*----------------------------------------------------------------------------*/
+u16int
+ softfloat_propagateNaNF16UI( u16int uiA, u16int uiB );
+
+/*----------------------------------------------------------------------------
+| The bit pattern for a default generated 32-bit floating-point NaN.
+*----------------------------------------------------------------------------*/
+#define defaultNaNF32UI 0xFFC00000
+
+/*----------------------------------------------------------------------------
+| Returns true when 32-bit unsigned integer 'uiA' has the bit pattern of a
+| 32-bit floating-point signaling NaN.
+| Note:  This macro evaluates its argument more than once.
+*----------------------------------------------------------------------------*/
+#define softfloat_isSigNaNF32UI( uiA ) ((((uiA) & 0x7FC00000) == 0x7F800000) && ((uiA) & 0x003FFFFF))
+
+/*----------------------------------------------------------------------------
+| Assuming 'uiA' has the bit pattern of a 32-bit floating-point NaN, converts
+| this NaN to the common NaN form, and stores the resulting common NaN at the
+| location pointed to by 'zPtr'.  If the NaN is a signaling NaN, the invalid
+| exception is raised.
+*----------------------------------------------------------------------------*/
+void softfloat_f32UIToCommonNaN( u32int uiA, struct commonNaN *zPtr );
+
+/*----------------------------------------------------------------------------
+| Converts the common NaN pointed to by 'aPtr' into a 32-bit floating-point
+| NaN, and returns the bit pattern of this value as an unsigned integer.
+*----------------------------------------------------------------------------*/
+u32int softfloat_commonNaNToF32UI( const struct commonNaN *aPtr );
+
+/*----------------------------------------------------------------------------
+| Interpreting 'uiA' and 'uiB' as the bit patterns of two 32-bit floating-
+| point values, at least one of which is a NaN, returns the bit pattern of
+| the combined NaN result.  If either 'uiA' or 'uiB' has the pattern of a
+| signaling NaN, the invalid exception is raised.
+*----------------------------------------------------------------------------*/
+u32int
+ softfloat_propagateNaNF32UI( u32int uiA, u32int uiB );
+
+/*----------------------------------------------------------------------------
+| The bit pattern for a default generated 64-bit floating-point NaN.
+*----------------------------------------------------------------------------*/
+#define defaultNaNF64UI UINT64_C( 0xFFF8000000000000 )
+
+/*----------------------------------------------------------------------------
+| Returns true when 64-bit unsigned integer 'uiA' has the bit pattern of a
+| 64-bit floating-point signaling NaN.
+| Note:  This macro evaluates its argument more than once.
+*----------------------------------------------------------------------------*/
+#define softfloat_isSigNaNF64UI( uiA ) ((((uiA) & UINT64_C( 0x7FF8000000000000 )) == UINT64_C( 0x7FF0000000000000 )) && ((uiA) & UINT64_C( 0x0007FFFFFFFFFFFF )))
+
+/*----------------------------------------------------------------------------
+| Assuming 'uiA' has the bit pattern of a 64-bit floating-point NaN, converts
+| this NaN to the common NaN form, and stores the resulting common NaN at the
+| location pointed to by 'zPtr'.  If the NaN is a signaling NaN, the invalid
+| exception is raised.
+*----------------------------------------------------------------------------*/
+void softfloat_f64UIToCommonNaN( u64int uiA, struct commonNaN *zPtr );
+
+/*----------------------------------------------------------------------------
+| Converts the common NaN pointed to by 'aPtr' into a 64-bit floating-point
+| NaN, and returns the bit pattern of this value as an unsigned integer.
+*----------------------------------------------------------------------------*/
+u64int softfloat_commonNaNToF64UI( const struct commonNaN *aPtr );
+
+/*----------------------------------------------------------------------------
+| Interpreting 'uiA' and 'uiB' as the bit patterns of two 64-bit floating-
+| point values, at least one of which is a NaN, returns the bit pattern of
+| the combined NaN result.  If either 'uiA' or 'uiB' has the pattern of a
+| signaling NaN, the invalid exception is raised.
+*----------------------------------------------------------------------------*/
+u64int
+ softfloat_propagateNaNF64UI( u64int uiA, u64int uiB );
+
+/*----------------------------------------------------------------------------
+| The bit pattern for a default generated 80-bit extended floating-point NaN.
+*----------------------------------------------------------------------------*/
+#define defaultNaNExtF80UI64 0xFFFF
+#define defaultNaNExtF80UI0  UINT64_C( 0xC000000000000000 )
+
+/*----------------------------------------------------------------------------
+| Returns true when the 80-bit unsigned integer formed from concatenating
+| 16-bit 'uiA64' and 64-bit 'uiA0' has the bit pattern of an 80-bit extended
+| floating-point signaling NaN.
+| Note:  This macro evaluates its arguments more than once.
+*----------------------------------------------------------------------------*/
+#define softfloat_isSigNaNExtF80UI( uiA64, uiA0 ) ((((uiA64) & 0x7FFF) == 0x7FFF) && ! ((uiA0) & UINT64_C( 0x4000000000000000 )) && ((uiA0) & UINT64_C( 0x3FFFFFFFFFFFFFFF )))
+
+#ifdef SOFTFLOAT_FAST_INT64
+
+/*----------------------------------------------------------------------------
+| The following functions are needed only when 'SOFTFLOAT_FAST_INT64' is
+| defined.
+*----------------------------------------------------------------------------*/
+
+/*----------------------------------------------------------------------------
+| Assuming the unsigned integer formed from concatenating 'uiA64' and 'uiA0'
+| has the bit pattern of an 80-bit extended floating-point NaN, converts
+| this NaN to the common NaN form, and stores the resulting common NaN at the
+| location pointed to by 'zPtr'.  If the NaN is a signaling NaN, the invalid
+| exception is raised.
+*----------------------------------------------------------------------------*/
+void
+ softfloat_extF80UIToCommonNaN(
+     u16int uiA64, u64int uiA0, struct commonNaN *zPtr );
+
+/*----------------------------------------------------------------------------
+| Converts the common NaN pointed to by 'aPtr' into an 80-bit extended
+| floating-point NaN, and returns the bit pattern of this value as an unsigned
+| integer.
+*----------------------------------------------------------------------------*/
+struct uint128 softfloat_commonNaNToExtF80UI( const struct commonNaN *aPtr );
+
+/*----------------------------------------------------------------------------
+| Interpreting the unsigned integer formed from concatenating 'uiA64' and
+| 'uiA0' as an 80-bit extended floating-point value, and likewise interpreting
+| the unsigned integer formed from concatenating 'uiB64' and 'uiB0' as another
+| 80-bit extended floating-point value, and assuming at least on of these
+| floating-point values is a NaN, returns the bit pattern of the combined NaN
+| result.  If either original floating-point value is a signaling NaN, the
+| invalid exception is raised.
+*----------------------------------------------------------------------------*/
+struct uint128
+ softfloat_propagateNaNExtF80UI(
+     u16int uiA64,
+     u64int uiA0,
+     u16int uiB64,
+     u64int uiB0
+ );
+
+/*----------------------------------------------------------------------------
+| The bit pattern for a default generated 128-bit floating-point NaN.
+*----------------------------------------------------------------------------*/
+#define defaultNaNF128UI64 UINT64_C( 0xFFFF800000000000 )
+#define defaultNaNF128UI0  UINT64_C( 0 )
+
+/*----------------------------------------------------------------------------
+| Returns true when the 128-bit unsigned integer formed from concatenating
+| 64-bit 'uiA64' and 64-bit 'uiA0' has the bit pattern of a 128-bit floating-
+| point signaling NaN.
+| Note:  This macro evaluates its arguments more than once.
+*----------------------------------------------------------------------------*/
+#define softfloat_isSigNaNF128UI( uiA64, uiA0 ) ((((uiA64) & UINT64_C( 0x7FFF800000000000 )) == UINT64_C( 0x7FFF000000000000 )) && ((uiA0) || ((uiA64) & UINT64_C( 0x00007FFFFFFFFFFF ))))
+
+/*----------------------------------------------------------------------------
+| Assuming the unsigned integer formed from concatenating 'uiA64' and 'uiA0'
+| has the bit pattern of a 128-bit floating-point NaN, converts this NaN to
+| the common NaN form, and stores the resulting common NaN at the location
+| pointed to by 'zPtr'.  If the NaN is a signaling NaN, the invalid exception
+| is raised.
+*----------------------------------------------------------------------------*/
+void
+ softfloat_f128UIToCommonNaN(
+     u64int uiA64, u64int uiA0, struct commonNaN *zPtr );
+
+/*----------------------------------------------------------------------------
+| Converts the common NaN pointed to by 'aPtr' into a 128-bit floating-point
+| NaN, and returns the bit pattern of this value as an unsigned integer.
+*----------------------------------------------------------------------------*/
+struct uint128 softfloat_commonNaNToF128UI( const struct commonNaN * );
+
+/*----------------------------------------------------------------------------
+| Interpreting the unsigned integer formed from concatenating 'uiA64' and
+| 'uiA0' as a 128-bit floating-point value, and likewise interpreting the
+| unsigned integer formed from concatenating 'uiB64' and 'uiB0' as another
+| 128-bit floating-point value, and assuming at least on of these floating-
+| point values is a NaN, returns the bit pattern of the combined NaN result.
+| If either original floating-point value is a signaling NaN, the invalid
+| exception is raised.
+*----------------------------------------------------------------------------*/
+struct uint128
+ softfloat_propagateNaNF128UI(
+     u64int uiA64,
+     u64int uiA0,
+     u64int uiB64,
+     u64int uiB0
+ );
+
+#else
+
+/*----------------------------------------------------------------------------
+| The following functions are needed only when 'SOFTFLOAT_FAST_INT64' is not
+| defined.
+*----------------------------------------------------------------------------*/
+
+/*----------------------------------------------------------------------------
+| Assuming the 80-bit extended floating-point value pointed to by 'aSPtr' is
+| a NaN, converts this NaN to the common NaN form, and stores the resulting
+| common NaN at the location pointed to by 'zPtr'.  If the NaN is a signaling
+| NaN, the invalid exception is raised.
+*----------------------------------------------------------------------------*/
+void
+ softfloat_extF80MToCommonNaN(
+     const struct extFloat80M *aSPtr, struct commonNaN *zPtr );
+
+/*----------------------------------------------------------------------------
+| Converts the common NaN pointed to by 'aPtr' into an 80-bit extended
+| floating-point NaN, and stores this NaN at the location pointed to by
+| 'zSPtr'.
+*----------------------------------------------------------------------------*/
+void
+ softfloat_commonNaNToExtF80M(
+     const struct commonNaN *aPtr, struct extFloat80M *zSPtr );
+
+/*----------------------------------------------------------------------------
+| Assuming at least one of the two 80-bit extended floating-point values
+| pointed to by 'aSPtr' and 'bSPtr' is a NaN, stores the combined NaN result
+| at the location pointed to by 'zSPtr'.  If either original floating-point
+| value is a signaling NaN, the invalid exception is raised.
+*----------------------------------------------------------------------------*/
+void
+ softfloat_propagateNaNExtF80M(
+     const struct extFloat80M *aSPtr,
+     const struct extFloat80M *bSPtr,
+     struct extFloat80M *zSPtr
+ );
+
+/*----------------------------------------------------------------------------
+| The bit pattern for a default generated 128-bit floating-point NaN.
+*----------------------------------------------------------------------------*/
+#define defaultNaNF128UI96 0xFFFF8000
+#define defaultNaNF128UI64 0
+#define defaultNaNF128UI32 0
+#define defaultNaNF128UI0  0
+
+/*----------------------------------------------------------------------------
+| Assuming the 128-bit floating-point value pointed to by 'aWPtr' is a NaN,
+| converts this NaN to the common NaN form, and stores the resulting common
+| NaN at the location pointed to by 'zPtr'.  If the NaN is a signaling NaN,
+| the invalid exception is raised.  Argument 'aWPtr' points to an array of
+| four 32-bit elements that concatenate in the platform's normal endian order
+| to form a 128-bit floating-point value.
+*----------------------------------------------------------------------------*/
+void
+ softfloat_f128MToCommonNaN( const u32int *aWPtr, struct commonNaN *zPtr );
+
+/*----------------------------------------------------------------------------
+| Converts the common NaN pointed to by 'aPtr' into a 128-bit floating-point
+| NaN, and stores this NaN at the location pointed to by 'zWPtr'.  Argument
+| 'zWPtr' points to an array of four 32-bit elements that concatenate in the
+| platform's normal endian order to form a 128-bit floating-point value.
+*----------------------------------------------------------------------------*/
+void
+ softfloat_commonNaNToF128M( const struct commonNaN *aPtr, u32int *zWPtr );
+
+/*----------------------------------------------------------------------------
+| Assuming at least one of the two 128-bit floating-point values pointed to by
+| 'aWPtr' and 'bWPtr' is a NaN, stores the combined NaN result at the location
+| pointed to by 'zWPtr'.  If either original floating-point value is a
+| signaling NaN, the invalid exception is raised.  Each of 'aWPtr', 'bWPtr',
+| and 'zWPtr' points to an array of four 32-bit elements that concatenate in
+| the platform's normal endian order to form a 128-bit floating-point value.
+*----------------------------------------------------------------------------*/
+void
+ softfloat_propagateNaNF128M(
+     const u32int *aWPtr, const u32int *bWPtr, u32int *zWPtr );
+
+#endif
--- a/unix/u.h
+++ b/unix/u.h
@@ -17,11 +17,14 @@
 typedef unsigned char uchar;
 typedef long long vlong;
 typedef unsigned long long uvlong;
+typedef int8_t s8int;
 typedef uint8_t u8int;
 typedef int16_t s16int;
 typedef uint16_t u16int;
 typedef int32_t s32int;
 typedef uint32_t u32int;
+typedef int64_t s64int;
+typedef uint64_t u64int;
 typedef intptr_t intptr;
 typedef uintptr_t uintptr;
 
--- a/wad.c
+++ b/wad.c
@@ -1,124 +1,242 @@
 #include "quakedef.h"
 
-static int wad_numlumps;
-static lumpinfo_t *wad_lumps;
-static byte *wad_base;
+enum {
+	WAD_VER2 = 'W'<<0|'A'<<8|'D'<<16|'2'<<24,
+	WAD_VER3 = 'W'<<0|'A'<<8|'D'<<16|'3'<<24,
 
-void SwapPic (qpic_t *pic);
+	TYP_QPIC = 0x42,
+	TYP_MIPTEX = 0x43, /* it IS in Half-Life */
+};
 
-/*
-==================
-W_CleanupName
+typedef struct Lmp Lmp;
 
-Lowercases name and pads with spaces and a terminating 0 to the length of
-lumpinfo_t->name.
-Used so lumpname lookups can proceed rapidly by comparing 4 chars at a time
-Space padding is so names can be printed nicely in tables.
-Can safely be performed in place.
-==================
-*/
-void W_CleanupName (char *in, char *out)
-{
-	int		i;
-	int		c;
+struct Lmp {
+	char name[16];
+	int off;
+	int sz;
+	int type;
+};
 
-	for (i=0 ; i<16 ; i++ )
-	{
-		c = in[i];
-		if (!c)
-			break;
+struct Wad {
+	byte *in;
+	int sz;
+	int ver;
+	int numlumps;
+	Lmp lumps[];
+};
 
-		if (c >= 'A' && c <= 'Z')
-			c += ('a' - 'A');
-		out[i] = c;
+Wad *
+W_OpenWad(char *path)
+{
+	Wad *w;
+	Lmp *lmp;
+	byte *in, *p;
+	int sz, ver, off, n, i;
+
+	if((in = loadhunklmp(path, &sz)) == nil)
+		goto err;
+	if(sz < 4+4+4){
+		werrstr("invalid size: %d", sz);
+		goto err;
 	}
+	p = in;
+	ver = le32(p);
+	if(ver != WAD_VER2 && ver != WAD_VER3){
+		werrstr("unsupported version: %c%c%c%c", (char)in[0], (char)in[1], (char)in[2], (char)in[3]);
+		goto err;
+	}
+	n = le32(p);
+	off = le32(p);
+	if(off < 0 || n < 0 || off+n*32 > sz){
+		werrstr("invalid wad: off=%d numlumps=%d sz=%d", off, n, sz);
+		goto err;
+	}
+	w = Hunk_Alloc(sizeof(*w) + sizeof(*w->lumps)*n);
+	w->in = in;
+	w->sz = sz;
+	w->ver = ver;
+	w->numlumps = n;
+	p = in + off;
+	for(lmp = w->lumps; n-- > 0; lmp++){
+		lmp->off = le32(p);
+		p += 4; /* disksize */
+		lmp->sz = le32(p);
+		lmp->type = *p++;
+		p += 1+2; /* compression + padding */
+		memmove(lmp->name, p, sizeof(lmp->name));
+		for(i = 0; i < nelem(lmp->name) && lmp->name[i]; i++)
+			lmp->name[i] = tolower(lmp->name[i]);
+		memset(lmp->name+i, 0, nelem(lmp->name)-i);
+		p += nelem(lmp->name);
+	}
 
-	for ( ; i< 16 ; i++ )
-		out[i] = 0;
+	return w;
+err:
+	werrstr("W_OpenWad: %s: %s", path, lerr());
+	return nil;
 }
 
+static Lmp *
+W_FindName(Wad *w, char *name)
+{
+	int i;
+	Lmp *lmp;
+	char t[16];
 
+	for(i = 0; i < nelem(lmp->name) && name[i]; i++)
+		t[i] = tolower(name[i]);
+	memset(t+i, 0, sizeof(t)-i);
+	for(i = 0, lmp = w->lumps; i < w->numlumps; i++, lmp++){
+		if(strncmp(lmp->name, t, nelem(lmp->name)) == 0)
+			return lmp;
+	}
+	werrstr("%s: not found", name);
+	return nil;
+}
 
-/*
-====================
-W_LoadWadFile
-====================
-*/
-void W_LoadWadFile (char *filename)
+qpic_t *
+W_ReadQpic(Wad *wad, char *name, mem_user_t *c)
 {
-	lumpinfo_t		*lump_p;
-	wadinfo_t		*header;
-	int				i;
-	int				infotableofs;
+	int i, n, w, h, palsz, j;
+	Lmp *lmp;
+	byte *p, *pal;
+	qpic_t *q;
+	mem_user_t dummy = {0};
 
-	wad_base = loadhunklmp(filename, nil);
-	if(wad_base == nil)
-		fatal("W_LoadWadFile: %s", lerr());
+	if(c == nil){
+		memset(&dummy, 0, sizeof(dummy));
+		c = &dummy;
+	}
+	if((q = Cache_Check(c)) != nil)
+		return q;
+	if((lmp = W_FindName(wad, name)) == nil || lmp->type != TYP_QPIC)
+		return nil;
+	p = wad->in + lmp->off;
+	w = le32(p);
+	h = le32(p);
+	n = w*h;
+	if(w < 0 || h < 0){
+		werrstr("invalid size: %dx%d", w, h);
+		goto err;
+	}
 
-	header = (wadinfo_t *)wad_base;
+	pal = nil;
+	palsz = 0;
+	if(wad->ver == WAD_VER2){
+		if(lmp->sz < 4+4+n){
+			werrstr("truncated: %d < %d", lmp->sz, 4+4+n);
+			goto err;
+		}
+	}else if(wad->ver == WAD_VER3){
+		pal = p + n;
+		palsz = le16(pal);
+		if(palsz < 0 || palsz > 256 || lmp->sz < 4+4+n+2+palsz*3){
+			werrstr("invalid: palsz=%d, %d < %d", palsz, lmp->sz, 4+4+n+2+palsz*3);
+			goto err;
+		}
+	}
 
-	if (header->identification[0] != 'W'
-	|| header->identification[1] != 'A'
-	|| header->identification[2] != 'D'
-	|| header->identification[3] != '2')
-		fatal ("Wad file %s doesn't have WAD2 id\n",filename);
+	q = Cache_Alloc(c, sizeof(*q) + n*sizeof(pixel_t));
+	q->width = w;
+	q->height = h;
 
-	wad_numlumps = LittleLong(header->numlumps);
-	infotableofs = LittleLong(header->infotableofs);
-	wad_lumps = (lumpinfo_t *)(wad_base + infotableofs);
-
-	for (i=0, lump_p = wad_lumps ; i<wad_numlumps ; i++,lump_p++)
-	{
-		lump_p->filepos = LittleLong(lump_p->filepos);
-		lump_p->size = LittleLong(lump_p->size);
-		W_CleanupName (lump_p->name, lump_p->name);
-		if (lump_p->type == TYP_QPIC)
-			SwapPic ( (qpic_t *)(wad_base + lump_p->filepos));
+	if(wad->ver == WAD_VER2){
+		for(i = 0; i < n; i++)
+			q->data[i] = q1pal[*p++];
+	}else if(wad->ver == WAD_VER3 && palsz > 0){
+		for(i = 0; i < n; i++){
+			j = (*p++)*3;
+			q->data[i] = j < palsz*3 ? (pal[j+0]<<16 | pal[j+1]<<8 | pal[j+2]) : 0;
+		}
 	}
+
+	return q;
+err:
+	werrstr("%.*s: %s", nelem(lmp->name), lmp->name, lerr());
+	return nil;
 }
 
-
-/*
-=============
-W_GetLumpinfo
-=============
-*/
-lumpinfo_t	*W_GetLumpinfo (char *name)
+static int
+W_ReadPixelsAt(Wad *wad, int off, int sz, pixel_t *out, int num)
 {
-	int		i;
-	lumpinfo_t	*lump_p;
-	char	clean[16];
+	int n, palsz, x;
+	byte *t, *pal;
 
-	W_CleanupName (name, clean);
-
-	for (lump_p=wad_lumps, i=0 ; i<wad_numlumps ; i++,lump_p++)
-	{
-		if (!strcmp(clean, lump_p->name))
-			return lump_p;
+	num = min(num, sz);
+	num = min(num, wad->sz-off);
+	t = wad->in + off;
+	if(wad->ver == WAD_VER2){
+		for(n = 0; n < num; n++)
+			*out++ = q1pal[*t++];
+	}else if(wad->ver == WAD_VER3){
+		if(off+num+2 >= wad->sz){
+			werrstr("invalid lump: %d > %d", off+num+2, wad->sz);
+			return -1;
+		}
+		pal = t + num;
+		palsz = le16(pal);
+		if(palsz < 0 || palsz > 256 || off+num+2+palsz*3 > wad->sz){
+			werrstr("invalid palette: palsz=%d pal_end=%d wad_sz=%d",
+				palsz, off+num+2+palsz*3, wad->sz);
+			goto err;
+		}
+		for(n = 0; n < num; n++){
+			x = (*t++)*3;
+			*out++ = x < palsz*3 ? (pal[x+0]<<16 | pal[x+1]<<8 | pal[x+2]) : 0;
+		}
 	}
-
-	fatal ("W_GetLumpinfo: %s not found", name);
+	return num;
+err:
+	return -1;
 }
 
-void *W_GetLumpName (char *name)
+int
+W_ReadMipTex(Wad *wad, char *name, texture_t *t)
 {
-	lumpinfo_t	*lump;
+	Lmp *lmp;
+	byte *p;
+	int i, w, h, n, off;
 
-	lump = W_GetLumpinfo (name);
-
-	return (void *)(wad_base + lump->filepos);
+	if((lmp = W_FindName(wad, name)) == nil || lmp->type != TYP_MIPTEX)
+		return -1;
+	p = wad->in + lmp->off + 16;
+	w = le32(p);
+	h = le32(p);
+	if(w != t->width || h != t->height){
+		werrstr("%s: size mismatch: (%d->%d)x(%d->%d)\n", name, w, t->width, h, t->height);
+		return -1;
+	}
+	n = w*h*85/64;
+	for(i = 0; i < nelem(t->offsets); i++)
+		t->offsets[i] = sizeof(texture_t) + (le32(p) - (16+2*4+4*4))*sizeof(pixel_t);
+	off = p - wad->in;
+	if((n = W_ReadPixelsAt(wad, off, lmp->off+lmp->sz-off, (pixel_t*)(t+1), n)) < 0)
+		werrstr("%s: %s", name, lerr());
+	return n;
 }
 
-/*
-=============================================================================
+int
+W_ReadPixels(Wad *wad, char *name, pixel_t *out, int num)
+{
+	Lmp *lmp;
+	int n;
 
-automatic byte swapping
+	if((lmp = W_FindName(wad, name)) == nil)
+		return -1;
+	if((n = W_ReadPixelsAt(wad, lmp->off, lmp->sz, out, num)) < 0)
+		werrstr("%s: %s", name, lerr());
+	return n;
+}
 
-=============================================================================
-*/
-
-void SwapPic (qpic_t *pic)
+int
+W_ReadRaw(Wad *wad, char *name, byte *out, int num)
 {
-	pic->width = LittleLong(pic->width);
-	pic->height = LittleLong(pic->height);
+	Lmp *lmp;
+
+	if((lmp = W_FindName(wad, name)) == nil)
+		return -1;
+	num = min(num, lmp->sz);
+	num = min(num, wad->sz-lmp->off);
+	memmove(out, wad->in+lmp->off, num);
+	return num;
 }
--- a/wad.h
+++ b/wad.h
@@ -1,47 +1,17 @@
-//===============
-//   TYPES
-//===============
+typedef struct Wad Wad;
+#pragma incomplete Wad
 
-#define	CMP_NONE		0
-#define	CMP_LZSS		1
-
-#define	TYP_NONE		0
-#define	TYP_LABEL		1
-
-#define	TYP_LUMPY		64				// 64 + grab command number
-#define	TYP_PALETTE		64
-#define	TYP_QTEX		65
-#define	TYP_QPIC		66
-#define	TYP_SOUND		67
-#define	TYP_MIPTEX		68
-
 typedef struct
 {
-	int width, height;
-	pixel_t data[]; // variably sized
-} qpic_t;
+	int width;
+	int height;
+	pixel_t data[];
+}qpic_t;
 
-typedef struct
-{
-	char		identification[4];		// should be WAD2 or 2DAW
-	int			numlumps;
-	int			infotableofs;
-} wadinfo_t;
+struct texture_s;
 
-typedef struct
-{
-	int			filepos;
-	int			disksize;
-	int			size;					// uncompressed
-	char		type;
-	char		compression;
-	char		pad1, pad2;
-	char		name[16];				// must be null terminated
-} lumpinfo_t;
-
-void	W_LoadWadFile (char *filename);
-void	W_CleanupName (char *in, char *out);
-lumpinfo_t	*W_GetLumpinfo (char *name);
-void	*W_GetLumpName (char *name);
-
-void SwapPic (qpic_t *pic);
+Wad *W_OpenWad(char *path);
+qpic_t *W_ReadQpic(Wad *w, char *name, mem_user_t *c);
+int W_ReadMipTex(Wad *wad, char *name, struct texture_s *t);
+int W_ReadPixels(Wad *w, char *name, pixel_t *out, int num);
+int W_ReadRaw(Wad *w, char *name, byte *out, int num);
--- a/zone.c
+++ b/zone.c
@@ -78,6 +78,7 @@
 {
 	mem_t *m, *n;
 
+	assert(p != nil);
 	m = p;
 	m--;
 	n = realloc(m, sizeof(*m) + m->size*2);