Downloads containing ctio-faster.js

Downloads
Name Author Game Mode Rating
WebJCS 1.3.3Featured Download djazz Utility 10 Download file

File preview

/*
 * rm - Feb 2011
 * ctio-faster.js:
 * This file is a modification of ctio.js that removes all the runtime sanity
 * checking for efficiency.
 *
 * A simple way to read and write simple ctypes. Of course, as you'll find the
 * code isn't as simple as it might appear. The following types are currently
 * supported in big and little endian formats:
 *
 * 	uint8_t			int8_t
 * 	uint16_t		int16_t
 * 	uint32_t		int32_t
 *	float (single precision IEEE 754)
 *	double (double precision IEEE 754)
 *
 * This is designed to work in Node and v8. It may in fact work in other
 * Javascript interpreters (that'd be pretty neat), but it hasn't been tested.
 * If you find that it does in fact work, that's pretty cool. Try and pass word
 * back to the original author.
 *
 * Note to the reader: If you're tabstop isn't set to 8, parts of this may look
 * weird.
 */

/*
 * Numbers in Javascript have a secret: all numbers must be represented with an
 * IEEE-754 double. The double has a mantissa with a length of 52 bits with an
 * implicit one. Thus the range of integers that can be represented is limited
 * to the size of the mantissa, this makes reading and writing 64-bit integers
 * difficult, but far from impossible.
 *
 * Another side effect of this representation is what happens when you use the
 * bitwise operators, i.e. shift left, shift right, and, or, etc. In Javascript,
 * each operand and the result is cast to a signed 32-bit number. However, in
 * the case of >>> the values are cast to an unsigned number.
 */

/*
 * A reminder on endian related issues:
 *
 * Big Endian: MSB -> First byte
 * Little Endian: MSB->Last byte
 */

/*
 * An 8 bit unsigned integer involves doing no significant work.
 */
function ruint8(buffer, endian, offset)
{
	return (buffer[offset]);
}

/*
 * For 16 bit unsigned numbers we can do all the casting that we want to do.
 */
function ruint16(buffer, endian, offset)
{
	var val = 0;
	if (endian == 'big') {
		val = buffer[offset] << 8;
		val |=  buffer[offset+1];
	} else {
		val = buffer[offset];
		val |= buffer[offset+1] << 8;
	}

	return (val);
}

/*
 * Because most bitshifting is done using signed numbers, if we would go into
 * the realm where we use that 32nd bit, we'll end up going into the negative
 * range. i.e.:
 * > 200 << 24
 * -939524096
 *
 * Not the value you'd expect. To work around this, we instead do a
 * multiplication by (1 << 24) which does the same thing, but in a way that
 * ensures we don't lose that bit.
 */

/*
 * Handle the case of losing our MSBit
 */
function fixu32(upper, lower)
{
	return ((upper * (1 << 24)) + lower);
}

function ruint32(buffer, endian, offset)
{
	var val = 0;

	if (endian == 'big') {
		val = buffer[offset+1] << 16;
		val |= buffer[offset+2] << 8;
		val |= buffer[offset+3];
		val = fixu32(buffer[offset], val);
	} else {
		val = buffer[offset+2] << 16;
		val |= buffer[offset+1] << 8;
		val |= buffer[offset];
		val = fixu32(buffer[offset+3], val);
	}

	return (val);
}

/*
 * Reads a 64-bit unsigned number. The astue observer will note that this
 * doesn't quite work. Javascript has chosen to only have numbers that can be
 * represented by a double. A double only has 52 bits of mantissa with an
 * implicit 1, thus we have up to 53 bits to represent an integer. However, 2^53
 * doesn't quite give us what we want. Isn't 53 bits enough for anyone? What
 * could you have possibly wanted to represent that was larger than that? Oh,
 * maybe a size? You mean we bypassed the 4 GB limit on file sizes, when did
 * that happen?
 *
 * To get around this egregious language issue, we're going to instead construct
 * an array of two 32 bit unsigned integers. Where arr[0] << 32 + arr[1] would
 * give the actual number. However, note that the above code probably won't
 * produce the desired results because of the way Javascript numbers are
 * doubles.
 */
function ruint64(buffer, endian, offset)
{
	var val = new Array(2);

	if (endian == 'big') {
		val[0] = ruint32(buffer, endian, offset);
		val[1] = ruint32(buffer, endian, offset+4);
	} else {
		val[0] = ruint32(buffer, endian, offset+4);
		val[1] = ruint32(buffer, endian, offset);
	}

	return (val);
}


/*
 * Signed integer types, yay team! A reminder on how two's complement actually
 * works. The first bit is the signed bit, i.e. tells us whether or not the
 * number should be positive or negative. If the two's complement value is
 * positive, then we're done, as it's equivalent to the unsigned representation.
 *
 * Now if the number is positive, you're pretty much done, you can just leverage
 * the unsigned translations and return those. Unfortunately, negative numbers
 * aren't quite that straightforward.
 *
 * At first glance, one might be inclined to use the traditional formula to
 * translate binary numbers between the positive and negative values in two's
 * complement. (Though it doesn't quite work for the most negative value)
 * Mainly:
 *  - invert all the bits
 *  - add one to the result
 *
 * Of course, this doesn't quite work in Javascript. Take for example the value
 * of -128. This could be represented in 16 bits (big-endian) as 0xff80. But of
 * course, Javascript will do the following:
 *
 * > ~0xff80
 * -65409
 *
 * Whoh there, Javascript, that's not quite right. But wait, according to
 * Javascript that's perfectly correct. When Javascript ends up seeing the
 * constant 0xff80, it has no notion that it is actually a signed number. It
 * assumes that we've input the unsigned value 0xff80. Thus, when it does the
 * binary negation, it casts it into a signed value, (positive 0xff80). Then
 * when you perform binary negation on that, it turns it into a negative number.
 *
 * Instead, we're going to have to use the following general formula, that works
 * in a rather Javascript friendly way. I'm glad we don't support this kind of
 * weird numbering scheme in the kernel.
 *
 * (BIT-MAX - (unsigned)val + 1) * -1
 *
 * The astute observer, may think that this doesn't make sense for 8-bit numbers
 * (really it isn't necessary for them). However, when you get 16-bit numbers,
 * you do. Let's go back to our prior example and see how this will look:
 *
 * (0xffff - 0xff80 + 1) * -1
 * (0x007f + 1) * -1
 * (0x0080) * -1
 *
 * Doing it this way ends up allowing us to treat it appropriately in
 * Javascript. Sigh, that's really quite ugly for what should just be a few bit
 * shifts, ~ and &.
 */

/*
 * Endianness doesn't matter for 8-bit signed values. We could in fact optimize
 * this case because the more traditional methods work, but for consistency,
 * we'll keep doing this the same way.
 */
function rsint8(buffer, endian, offset)
{
	var neg;

	neg = buffer[offset] & 0x80;
	if (!neg)
		return (buffer[offset]);

	return ((0xff - buffer[offset] + 1) * -1);
}

/*
 * The 16-bit version requires a bit more effort. In this case, we can leverage
 * our unsigned code to generate the value we want to return.
 */
function rsint16(buffer, endian, offset)
{
	var neg, val;

	val = ruint16(buffer, endian, offset);
	neg = val & 0x8000;
	if (!neg)
		return (val);

	return ((0xffff - val + 1) * -1);
}

/*
 * We really shouldn't leverage our 32-bit code here and instead utilize the
 * fact that we know that since these are signed numbers, we can do all the
 * shifting and binary anding to generate the 32-bit number. But, for
 * consistency we'll do the same. If we want to do otherwise, we should instead
 * make the 32 bit unsigned code do the optimization. But as long as there
 * aren't floats secretly under the hood for that, we /should/ be okay.
 */
function rsint32(buffer, endian, offset)
{
	var neg, val;

	val = ruint32(buffer, endian, offset);
	neg = val & 0x80000000;
	if (!neg)
		return (val);

	return ((0xffffffff - val + 1) * -1);
}

/*
 * The signed version of this code suffers from all of the same problems of the
 * other 64 bit version.
 */
function rsint64(buffer, endian, offset)
{
	var neg, val;

	val = ruint64(buffer, endian, offset);
	neg = val[0] & 0x80000000;

	if (!neg)
		return (val);

	val[0] = (0xffffffff - val[0]) * -1;
	val[1] = (0xffffffff - val[1] + 1) * -1;

	return (val);
}

/*
 * We now move onto IEEE 754: The traditional form for floating point numbers
 * and what is secretly hiding at the heart of everything in this. I really hope
 * that someone is actually using this, as otherwise, this effort is probably
 * going to be more wasted.
 *
 * One might be tempted to use parseFloat here, but that wouldn't work at all
 * for several reasons. Mostly due to the way floats actually work, and
 * parseFloat only actually works in base 10. I don't see base 10 anywhere near
 * this file.
 *
 * In this case we'll implement the single and double precision versions. The
 * quadruple precision, while probably useful, wouldn't really be accepted by
 * Javascript, so let's not even waste our time.
 *
 * So let's review how this format looks like. A single precision value is 32
 * bits and has three parts:
 *   -  Sign bit
 *   -  Exponent (Using bias notation)
 *   -  Mantissa
 *
 * |s|eeeeeeee|mmmmmmmmmmmmmmmmmmmmmmmmm|
 * 31| 30-23  |  22    	-       0       |
 *
 * The exponent is stored in a biased input. The bias in this case 127.
 * Therefore, our exponent is equal to the 8-bit value - 127.
 *
 * By default, a number is normalized in IEEE, that means that the mantissa has
 * an implicit one that we don't see. So really the value stored is 1.m.
 * However, if the exponent is all zeros, then instead we have to shift
 * everything to the right one and there is no more implicit one.
 *
 * Special values:
 *  - Positive Infinity:
 *	Sign:		0
 *	Exponent: 	All 1s
 *	Mantissa:	0
 *  - Negative Infinity:
 *	Sign:		1
 *	Exponent: 	All 1s
 *	Mantissa:	0
 *  - NaN:
 *	Sign:		*
 *	Exponent: 	All 1s
 *	Mantissa:	non-zero
 *  - Zero:
 *	Sign:		*
 *	Exponent:	All 0s
 *	Mantissa:	0
 *
 * In the case of zero, the sign bit determines whether we get a positive or
 * negative zero. However, since Javascript cannot determine the difference
 * between the two: i.e. -0 == 0, we just always return 0.
 *
 */
function rfloat(buffer, endian, offset)
{
	var bytes = [];
	var sign, exponent, mantissa, val;
	var bias = 127;
	var maxexp = 0xff;

	/* Normalize the bytes to be in endian order */
	if (endian == 'big') {
		bytes[0] = buffer[offset];
		bytes[1] = buffer[offset+1];
		bytes[2] = buffer[offset+2];
		bytes[3] = buffer[offset+3];
	} else {
		bytes[3] = buffer[offset];
		bytes[2] = buffer[offset+1];
		bytes[1] = buffer[offset+2];
		bytes[0] = buffer[offset+3];
	}

	sign = bytes[0] & 0x80;
	exponent = (bytes[0] & 0x7f) << 1;
	exponent |= (bytes[1] & 0x80) >>> 7;
	mantissa = (bytes[1] & 0x7f) << 16;
	mantissa |= bytes[2] << 8;
	mantissa |= bytes[3];

	/* Check for special cases before we do general parsing */
	if (!sign && exponent == maxexp && mantissa === 0)
		return (Number.POSITIVE_INFINITY);

	if (sign && exponent == maxexp && mantissa === 0)
		return (Number.NEGATIVE_INFINITY);

	if (exponent == maxexp && mantissa !== 0)
		return (Number.NaN);

	/*
	 * Javascript really doesn't have support for positive or negative zero.
	 * So we're not going to try and give it to you. That would be just
	 * plain weird. Besides -0 == 0.
	 */
	if (exponent === 0 && mantissa === 0)
		return (0);

	/*
	 * Now we can deal with the bias and the determine whether the mantissa
	 * has the implicit one or not.
	 */
	exponent -= bias;
	if (exponent == -bias) {
		exponent++;
		val = 0;
	} else {
		val = 1;
	}

	val = (val + mantissa * Math.pow(2, -23)) * Math.pow(2, exponent);

	if (sign)
		val *= -1;

	return (val);
}

/*
 * Doubles in IEEE 754 are like their brothers except for a few changes and
 * increases in size:
 *   - The exponent is now 11 bits
 *   - The mantissa is now 52 bits
 *   - The bias is now 1023
 *
 * |s|eeeeeeeeeee|mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm|
 * 63| 62 - 52   | 	51		-			0     |
 * 63| 62 - 52   |      51              -                       0     |
 *
 * While the size has increased a fair amount, we're going to end up keeping the
 * same general formula for calculating the final value. As a reminder, this
 * formula is:
 *
 * (-1)^s * (n + m) * 2^(e-b)
 *
 * Where:
 *	s	is the sign bit
 *	n	is (exponent > 0) ? 1 : 0 -- Determines whether we're normalized
 *					     or not
 *	m	is the mantissa
 *	e	is the exponent specified
 *	b	is the bias for the exponent
 *
 */
function rdouble(buffer, endian, offset)
{
	var bytes = [];
	var sign, exponent, mantissa, val, lowmant;
	var bias = 1023;
	var maxexp = 0x7ff;

	/* Normalize the bytes to be in endian order */
	if (endian == 'big') {
		bytes[0] = buffer[offset];
		bytes[1] = buffer[offset+1];
		bytes[2] = buffer[offset+2];
		bytes[3] = buffer[offset+3];
		bytes[4] = buffer[offset+4];
		bytes[5] = buffer[offset+5];
		bytes[6] = buffer[offset+6];
		bytes[7] = buffer[offset+7];
	} else {
		bytes[7] = buffer[offset];
		bytes[6] = buffer[offset+1];
		bytes[5] = buffer[offset+2];
		bytes[4] = buffer[offset+3];
		bytes[3] = buffer[offset+4];
		bytes[2] = buffer[offset+5];
		bytes[1] = buffer[offset+6];
		bytes[0] = buffer[offset+7];
	}

	/*
	 * We can construct the exponent and mantissa the same way as we did in
	 * the case of a float, just increase the range of the exponent.
	 */
	sign = bytes[0] & 0x80;
	exponent = (bytes[0] & 0x7f) << 4;
	exponent |= (bytes[1] & 0xf0) >>> 4;

	/*
	 * This is going to be ugly but then again, we're dealing with IEEE 754.
	 * This could probably be done as a node add on in a few lines of C++,
	 * but oh we'll, we've made it this far so let's be native the rest of
	 * the way...
	 *
	 * What we're going to do is break the mantissa into two parts, the
	 * lower 24 bits and the upper 28 bits. We'll multiply the upper 28 bits
	 * by the appropriate power and then add in the lower 24-bits. Not
	 * really that great. It's pretty much a giant kludge to deal with
	 * Javascript eccentricities around numbers.
	 */
	lowmant = bytes[7];
	lowmant |= bytes[6] << 8;
	lowmant |= bytes[5] << 16;
	mantissa = bytes[4];
	mantissa |= bytes[3] << 8;
	mantissa |= bytes[2] << 16;
	mantissa |= (bytes[1] & 0x0f) << 24;
	mantissa *= Math.pow(2, 24); /* Equivalent to << 24, but JS compat */
	mantissa += lowmant;

	/* Check for special cases before we do general parsing */
	if (!sign && exponent == maxexp && mantissa === 0)
		return (Number.POSITIVE_INFINITY);

	if (sign && exponent == maxexp && mantissa === 0)
		return (Number.NEGATIVE_INFINITY);

	if (exponent == maxexp && mantissa !== 0)
		return (Number.NaN);

	/*
	 * Javascript really doesn't have support for positive or negative zero.
	 * So we're not going to try and give it to you. That would be just
	 * plain weird. Besides -0 == 0.
	 */
	if (exponent === 0 && mantissa === 0)
		return (0);

	/*
	 * Now we can deal with the bias and the determine whether the mantissa
	 * has the implicit one or not.
	 */
	exponent -= bias;
	if (exponent == -bias) {
		exponent++;
		val = 0;
	} else {
		val = 1;
	}

	val = (val + mantissa * Math.pow(2, -52)) * Math.pow(2, exponent);

	if (sign)
		val *= -1;

	return (val);
}

/*
 * Now that we have gone through the pain of reading the individual types, we're
 * probably going to want some way to write these back. None of this is going to
 * be good. But since we have Javascript numbers this should certainly be more
 * interesting. Though we can constrain this end a little bit more in what is
 * valid. For now, let's go back to our friends the unsigned value.
 */

/*
 * Unsigned numbers seem deceptively easy. Here are the general steps and rules
 * that we are going to take:
 *   -  If the number is negative, throw an Error
 *   -  Truncate any floating point portion
 *   -  Take the modulus of the number in our base
 *   -  Write it out to the buffer in the endian format requested at the offset
 */

/*
 * We have to make sure that the value is a valid integer. This means that it is
 * non-negative. It has no fractional component and that it does not exceed the
 * maximum allowed value.
 *
 *	value		The number to check for validity
 *
 *	max		The maximum value
 */
function prepuint(value, max)
{
	if (typeof (value) != 'number')
		throw (new (Error('cannot write a non-number as a number')));

	if (value < 0)
		throw (new Error('specified a negative value for writing an ' +
		    'unsigned value'));

	if (value > max)
		throw (new Error('value is larger than maximum value for ' +
		    'type'));

	if (Math.floor(value) !== value)
		throw (new Error('value has a fractional component'));

	return (value);
}

/*
 * 8-bit version, classy. We can ignore endianness which is good.
 */
function wuint8(value, endian, buffer, offset)
{
	var val;

	val = prepuint(value, 0xff);
	buffer[offset] = val;
}

/*
 * Pretty much the same as the 8-bit version, just this time we need to worry
 * about endian related issues.
 */
function wuint16(value, endian, buffer, offset)
{
	var val;

	val = prepuint(value, 0xffff);
	if (endian == 'big') {
		buffer[offset] = (val & 0xff00) >>> 8;
		buffer[offset+1] = val & 0x00ff;
	} else {
		buffer[offset+1] = (val & 0xff00) >>> 8;
		buffer[offset] = val & 0x00ff;
	}
}

/*
 * The 32-bit version is going to have to be a little different unfortunately.
 * We can't quite bitshift to get the largest byte, because that would end up
 * getting us caught by the signed values.
 *
 * And yes, we do want to subtract out the lower part by default. This means
 * that when we do the division, it will be treated as a bit shift and we won't
 * end up generating a floating point value. If we did generate a floating point
 * value we'd have to truncate it intelligently, this saves us that problem and
 * may even be somewhat faster under the hood.
 */
function wuint32(value, endian, buffer, offset)
{
	var val;

	val = prepuint(value, 0xffffffff);
	if (endian == 'big') {
		buffer[offset] = (val - (val & 0x00ffffff)) / Math.pow(2, 24);
		buffer[offset+1] = (val >>> 16) & 0xff;
		buffer[offset+2] = (val >>> 8) & 0xff;
		buffer[offset+3] = val & 0xff;
	} else {
		buffer[offset+3] = (val - (val & 0x00ffffff)) /
		    Math.pow(2, 24);
		buffer[offset+2] = (val >>> 16) & 0xff;
		buffer[offset+1] = (val >>> 8) & 0xff;
		buffer[offset] = val & 0xff;
	}
}

/*
 * Unlike the other versions, we expect the value to be in the form of two
 * arrays where value[0] << 32 + value[1] would result in the value that we
 * want.
 */
function wuint64(value, endian, buffer, offset)
{
	prepuint(value[0], 0xffffffff);
	prepuint(value[1], 0xffffffff);

	if (endian == 'big') {
		wuint32(value[0], endian, buffer, offset);
		wuint32(value[1], endian, buffer, offset+4);
	} else {
		wuint32(value[0], endian, buffer, offset+4);
		wuint32(value[1], endian, buffer, offset);
	}
}

/*
 * We now move onto our friends in the signed number category. Unlike unsigned
 * numbers, we're going to have to worry a bit more about how we put values into
 * arrays. Since we are only worrying about signed 32-bit values, we're in
 * slightly better shape. Unfortunately, we really can't do our favorite binary
 * & in this system. It really seems to do the wrong thing. For example:
 *
 * > -32 & 0xff
 * 224
 *
 * What's happening above is really: 0xe0 & 0xff = 0xe0. However, the results of
 * this aren't treated as a signed number. Ultimately a bad thing.
 *
 * What we're going to want to do is basically create the unsigned equivalent of
 * our representation and pass that off to the wuint* functions. To do that
 * we're going to do the following:
 *
 *  - if the value is positive
 *	we can pass it directly off to the equivalent wuint
 *  - if the value is negative
 *	we do the following computation:
 *	mb + val + 1, where
 *	mb	is the maximum unsigned value in that byte size
 *	val	is the Javascript negative integer
 *
 *
 * As a concrete value, take -128. In signed 16 bits this would be 0xff80. If
 * you do out the computations:
 *
 * 0xffff - 128 + 1
 * 0xffff - 127
 * 0xff80
 *
 * You can then encode this value as the signed version. This is really rather
 * hacky, but it should work and get the job done which is our goal here.
 *
 * Thus the overall flow is:
 *   -  Truncate the floating point part of the number
 *   -  We don't have to take the modulus, because the unsigned versions will
 *   	take care of that for us. And we don't have to worry about that
 *   	potentially causing bad things to happen because of sign extension
 *   -  Pass it off to the appropriate unsigned version, potentially modifying
 *	the negative portions as necessary.
 */

/*
 * A series of checks to make sure we actually have a signed 32-bit number
 */
function prepsint(value, max, min)
{
	if (typeof (value) != 'number')
		throw (new (Error('cannot write a non-number as a number')));

	if (value > max)
		throw (new Error('value larger than maximum allowed value'));

	if (value < min)
		throw (new Error('value smaller than minimum allowed value'));

	if (Math.floor(value) !== value)
		throw (new Error('value has a fractional component'));

	return (value);
}

/*
 * The 8-bit version of the signed value. Overall, fairly straightforward.
 */
function wsint8(value, endian, buffer, offset)
{
	var val;

	val = prepsint(value, 0x7f, -0xf0);
	if (val >= 0)
		wuint8(val, endian, buffer, offset);
	else
		wuint8(0xff + val + 1, endian, buffer, offset);
}

/*
 * The 16-bit version of the signed value. Also, fairly straightforward.
 */
function wsint16(value, endian, buffer, offset)
{
	var val;

	val = prepsint(value, 0x7fff, -0xf000);
	if (val >= 0)
		wuint16(val, endian, buffer, offset);
	else
		wuint16(0xffff + val + 1, endian, buffer, offset);

}

/*
 * We can do this relatively easily by leveraging the code used for 32-bit
 * unsigned code.
 */
function wsint32(value, endian, buffer, offset)
{
	var val;

	val = prepsint(value, 0x7fffffff, -0xf0000000);
	if (val >= 0)
		wuint32(val, endian, buffer, offset);
	else
		wuint32(0xffffffff + val + 1, endian, buffer, offset);
}

/*
 * The signed 64 bit integer should by in the same format as when received.
 * Mainly it should ensure that the value is an array of two integers where
 * value[0] << 32 + value[1] is the desired number.
 */
function wsint64(value, endian, buffer, offset)
{
	var vals = new Array(2);

	prepsint(value[0], 0x7fffffff, -0xf0000000);
	prepsint(value[1], 0xffffffff, -0xffffffff);

	/* Fix negative numbers */
	if (value[0] < 0 || value[1] < 0) {
		vals[0] = 0xffffffff - Math.abs(value[0]);
		vals[1] = 0x100000000 - Math.abs(value[1]);
		if (vals[1] == 0x100000000) {
			vals[1] = 0;
			vals[0]++;
		}
	} else {
		vals[0] = value[0];
		vals[1] = value[1];
	}

	if (endian == 'big') {
		wuint32(vals[0], endian, buffer, offset);
		wuint32(vals[1], endian, buffer, offset+4);
	} else {
		wuint32(vals[0], endian, buffer, offset+4);
		wuint32(vals[1], endian, buffer, offset);
	}
}

/*
 * Now we are moving onto the weirder of these, the float and double. For this
 * we're going to just have to do something that's pretty weird. First off, we
 * have no way to get at the underlying float representation, at least not
 * easily. But that doesn't mean we can't figure it out, we just have to use our
 * heads.
 *
 * One might propose to use Number.toString(2). Of course, this is not really
 * that good, because the ECMAScript 262 v3 Standard says the following Section
 * 15.7.4.2-Number.prototype.toString (radix):
 *
 * If radix is an integer from 2 to 36, but not 10, the result is a string, the
 * choice of which is implementation-dependent.
 *
 * Well that doesn't really help us one bit now does it? We could use the
 * standard base 10 version of the string, but that's just going to create more
 * errors as we end up trying to convert it back to a binary value. So, really
 * this just means we have to be non-lazy and parse the structure intelligently.
 *
 * First off, we can do the basic checks: NaN, positive and negative infinity.
 *
 * Now that those are done we can work backwards to generate the mantissa and
 * exponent.
 *
 * The first thing we need to do is determine the sign bit, easy to do, check
 * whether the value is less than 0. And convert the number to its absolute
 * value representation. Next, we need to determine if the value is less than
 * one or greater than or equal to one and from there determine what power was
 * used to get there. What follows is now specific to floats, though the general
 * ideas behind this will hold for doubles as well, but the exact numbers
 * involved will change.
 *
 * Once we have that power we can determine the exponent and the mantissa. Call
 * the value that has the number of bits to reach the power ebits. In the
 * general case they have the following values:
 *
 *	exponent	127 + ebits
 *	mantissa	value * 2^(23 - ebits) & 0x7fffff
 *
 * In the case where the value of ebits is <= -127 we are now in the case where
 * we no longer have normalized numbers. In this case the values take on the
 * following values:
 *
 * 	exponent	0
 *	mantissa	value * 2^149 & 0x7fffff
 *
 * Once we have the values for the sign, mantissa, and exponent. We reconstruct
 * the four bytes as follows:
 *
 *	byte0		sign bit and seven most significant bits from the exp
 *			sign << 7 | (exponent & 0xfe) >>> 1
 *
 *	byte1		lsb from the exponent and 7 top bits from the mantissa
 *			(exponent & 0x01) << 7 | (mantissa & 0x7f0000) >>> 16
 *
 *	byte2		bits 8-15 (zero indexing) from mantissa
 *			mantissa & 0xff00 >> 8
 *
 *	byte3		bits 0-7 from mantissa
 *			mantissa & 0xff
 *
 * Once we have this we have to assign them into the buffer in proper endian
 * order.
 */

/*
 * Compute the log base 2 of the value. Now, someone who remembers basic
 * properties of logarithms will point out that we could use the change of base
 * formula for logs, and in fact that would be astute, because that's what we'll
 * do for now. It feels cleaner, albeit it may be less efficient than just
 * iterating and dividing by 2. We may want to come back and revisit that some
 * day.
 */
function log2(value)
{
	return (Math.log(value) / Math.log(2));
}

/*
 * Helper to determine the exponent of the number we're looking at.
 */
function intexp(value)
{
	return (Math.floor(log2(value)));
}

/*
 * Helper to determine the exponent of the fractional part of the value.
 */
function fracexp(value)
{
	return (Math.floor(log2(value)));
}

function wfloat(value, endian, buffer, offset)
{
	var sign, exponent, mantissa, ebits;
	var bytes = [];

	if (isNaN(value)) {
		sign = 0;
		exponent = 0xff;
		mantissa = 23;
	} else if (value == Number.POSITIVE_INFINITY) {
		sign = 0;
		exponent = 0xff;
		mantissa = 0;
	} else if (value == Number.NEGATIVE_INFINITY) {
		sign = 1;
		exponent = 0xff;
		mantissa = 0;
	} else {
		/* Well we have some work to do */

		/* Thankfully the sign bit is trivial */
		if (value < 0) {
			sign = 1;
			value = Math.abs(value);
		} else {
			sign = 0;
		}

		/* Use the correct function to determine number of bits */
		if (value < 1)
			ebits = fracexp(value);
		else
			ebits = intexp(value);

		/* Time to deal with the issues surrounding normalization */
		if (ebits <= -127) {
			exponent = 0;
			mantissa = (value * Math.pow(2, 149)) & 0x7fffff;
		} else {
			exponent = 127 + ebits;
			mantissa = value * Math.pow(2, 23 - ebits);
			mantissa &= 0x7fffff;
		}
	}

	bytes[0] = sign << 7 | (exponent & 0xfe) >>> 1;
	bytes[1] = (exponent & 0x01) << 7 | (mantissa & 0x7f0000) >>> 16;
	bytes[2] = (mantissa & 0x00ff00) >>> 8;
	bytes[3] = mantissa & 0x0000ff;

	if (endian == 'big') {
		buffer[offset] = bytes[0];
		buffer[offset+1] = bytes[1];
		buffer[offset+2] = bytes[2];
		buffer[offset+3] = bytes[3];
	} else {
		buffer[offset] = bytes[3];
		buffer[offset+1] = bytes[2];
		buffer[offset+2] = bytes[1];
		buffer[offset+3] = bytes[0];
	}
}

/*
 * Now we move onto doubles. Doubles are similar to floats in pretty much all
 * ways except that the processing isn't quite as straightforward because we
 * can't always use shifting, i.e. we have > 32 bit values.
 *
 * We're going to proceed in an identical fashion to floats and utilize the same
 * helper functions. All that really is changing are the specific values that we
 * use to do the calculations. Thus, to review we have to do the following.
 *
 * First get the sign bit and convert the value to its absolute value
 * representation. Next, we determine the number of bits that we used to get to
 * the value, branching whether the value is greater than or less than 1. Once
 * we have that value which we will again call ebits, we have to do the
 * following in the general case:
 *
 *	exponent	1023 + ebits
 *	mantissa	[value * 2^(52 - ebits)] % 2^52
 *
 * In the case where the value of ebits <= -1023 we no longer use normalized
 * numbers, thus like with floats we have to do slightly different processing:
 *
 *	exponent	0
 *	mantissa	[value * 2^1074] % 2^52
 *
 * Once we have determined the sign, exponent and mantissa we can construct the
 * bytes as follows:
 *
 *	byte0		sign bit and seven most significant bits form the exp
 *			sign << 7 | (exponent & 0x7f0) >>> 4
 *
 *	byte1		Remaining 4 bits from the exponent and the four most
 *			significant bits from the mantissa 48-51
 *			(exponent & 0x00f) << 4 | mantissa >>> 48
 *
 *	byte2		Bits 40-47 from the mantissa
 *			(mantissa >>> 40) & 0xff
 *
 *	byte3		Bits 32-39 from the mantissa
 *			(mantissa >>> 32) & 0xff
 *
 *	byte4		Bits 24-31 from the mantissa
 *			(mantissa >>> 24) & 0xff
 *
 *	byte5		Bits 16-23 from the Mantissa
 *			(mantissa >>> 16) & 0xff
 *
 *	byte6		Bits 8-15 from the mantissa
 *			(mantissa >>> 8) & 0xff
 *
 *	byte7		Bits 0-7 from the mantissa
 *			mantissa & 0xff
 *
 * Now we can't quite do the right shifting that we want in bytes 1 - 3, because
 * we'll have extended too far and we'll lose those values when we try and do
 * the shift. Instead we have to use an alternate approach. To try and stay out
 * of floating point, what we'll do is say that mantissa -= bytes[4-7] and then
 * divide by 2^32. Once we've done that we can use binary arithmetic. Oof,
 * that's ugly, but it seems to avoid using floating point (just based on how v8
 * seems to be optimizing for base 2 arithmetic).
 */
function wdouble(value, endian, buffer, offset)
{
	var sign, exponent, mantissa, ebits;
	var bytes = [];

	if (isNaN(value)) {
		sign = 0;
		exponent = 0x7ff;
		mantissa = 23;
	} else if (value == Number.POSITIVE_INFINITY) {
		sign = 0;
		exponent = 0x7ff;
		mantissa = 0;
	} else if (value == Number.NEGATIVE_INFINITY) {
		sign = 1;
		exponent = 0x7ff;
		mantissa = 0;
	} else {
		/* Well we have some work to do */

		/* Thankfully the sign bit is trivial */
		if (value < 0) {
			sign = 1;
			value = Math.abs(value);
		} else {
			sign = 0;
		}

		/* Use the correct function to determine number of bits */
		if (value < 1)
			ebits = fracexp(value);
		else
			ebits = intexp(value);

		/*
		 * This is a total hack to determine a denormalized value.
		 * Unfortunately, we sometimes do not get a proper value for
		 * ebits, i.e. we lose the values that would get rounded off.
		 *
		 *
		 * The astute observer may wonder why we would be
		 * multiplying by two Math.pows rather than just summing
		 * them. Well, that's to get around a small bug in the
		 * way v8 seems to implement the function. On occasion
		 * doing:
		 *
		 * foo * Math.pow(2, 1023 + 51)
		 *
		 * Causes us to overflow to infinity, where as doing:
		 *
		 * foo * Math.pow(2, 1023) * Math.pow(2, 51)
		 *
		 * Does not cause us to overflow. Go figure.
		 *
		 */
		if (value <= 2.225073858507201e-308 || ebits <= -1023) {
			exponent = 0;
			mantissa = value * Math.pow(2, 1023) * Math.pow(2, 51);
			mantissa %= Math.pow(2, 52);
		} else {
			/*
			 * We might have gotten fucked by our floating point
			 * logarithm magic. This is rather crappy, but that's
			 * our luck. If we just had a log base 2 or access to
			 * the stupid underlying representation this would have
			 * been much easier and we wouldn't have such stupid
			 * kludges or hacks.
			 */
			if (ebits > 1023)
				ebits = 1023;
			exponent = 1023 + ebits;
			mantissa = value * Math.pow(2, -ebits);
			mantissa *= Math.pow(2, 52);
			mantissa %= Math.pow(2, 52);
		}
	}

	/* Fill the bytes in backwards to deal with the size issues */
	bytes[7] = mantissa & 0xff;
	bytes[6] = (mantissa >>> 8) & 0xff;
	bytes[5] = (mantissa >>> 16) & 0xff;
	mantissa = (mantissa - (mantissa & 0xffffff)) / Math.pow(2, 24);
	bytes[4] = mantissa & 0xff;
	bytes[3] = (mantissa >>> 8) & 0xff;
	bytes[2] = (mantissa >>> 16) & 0xff;
	bytes[1] = (exponent & 0x00f) << 4 | mantissa >>> 24;
	bytes[0] = (sign << 7) | (exponent & 0x7f0) >>> 4;

	if (endian == 'big') {
		buffer[offset] = bytes[0];
		buffer[offset+1] = bytes[1];
		buffer[offset+2] = bytes[2];
		buffer[offset+3] = bytes[3];
		buffer[offset+4] = bytes[4];
		buffer[offset+5] = bytes[5];
		buffer[offset+6] = bytes[6];
		buffer[offset+7] = bytes[7];
	} else {
		buffer[offset+7] = bytes[0];
		buffer[offset+6] = bytes[1];
		buffer[offset+5] = bytes[2];
		buffer[offset+4] = bytes[3];
		buffer[offset+3] = bytes[4];
		buffer[offset+2] = bytes[5];
		buffer[offset+1] = bytes[6];
		buffer[offset] = bytes[7];
	}
}

/*
 * Actually export our work above. One might argue that we shouldn't expose
 * these interfaces and just force people to use the higher level abstractions
 * around this work. However, unlike say other libraries we've come across, this
 * interface has several properties: it makes sense, it's simple, and it's
 * useful.
 */
exports.ruint8 = ruint8;
exports.ruint16 = ruint16;
exports.ruint32 = ruint32;
exports.ruint64 = ruint64;
exports.wuint8 = wuint8;
exports.wuint16 = wuint16;
exports.wuint32 = wuint32;
exports.wuint64 = wuint64;

exports.rsint8 = rsint8;
exports.rsint16 = rsint16;
exports.rsint32 = rsint32;
exports.rsint64 = rsint64;
exports.wsint8 = wsint8;
exports.wsint16 = wsint16;
exports.wsint32 = wsint32;
exports.wsint64 = wsint64;

exports.rfloat = rfloat;
exports.rdouble = rdouble;
exports.wfloat = wfloat;
exports.wdouble = wdouble;