Date: Fri, 20 Jan 2012 06:16:37 +0000 (UTC) From: David Schultz <das@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-vendor@freebsd.org Subject: svn commit: r230364 - in vendor/NetBSD/softfloat: . dist dist/bits32 dist/bits64 dist/templates Message-ID: <201201200616.q0K6GbHo026916@svn.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: das Date: Fri Jan 20 06:16:37 2012 New Revision: 230364 URL: http://svn.freebsd.org/changeset/base/230364 Log: Import SoftFloat from NetBSD. Added: vendor/NetBSD/softfloat/ vendor/NetBSD/softfloat/dist/ vendor/NetBSD/softfloat/dist/Makefile.inc (contents, props changed) vendor/NetBSD/softfloat/dist/README.NetBSD vendor/NetBSD/softfloat/dist/README.txt (contents, props changed) vendor/NetBSD/softfloat/dist/bits32/ vendor/NetBSD/softfloat/dist/bits32/softfloat-macros vendor/NetBSD/softfloat/dist/bits32/softfloat.c (contents, props changed) vendor/NetBSD/softfloat/dist/bits64/ vendor/NetBSD/softfloat/dist/bits64/softfloat-macros vendor/NetBSD/softfloat/dist/bits64/softfloat.c (contents, props changed) vendor/NetBSD/softfloat/dist/eqdf2.c (contents, props changed) vendor/NetBSD/softfloat/dist/eqsf2.c (contents, props changed) vendor/NetBSD/softfloat/dist/eqtf2.c (contents, props changed) vendor/NetBSD/softfloat/dist/fpgetmask.c (contents, props changed) vendor/NetBSD/softfloat/dist/fpgetround.c (contents, props changed) vendor/NetBSD/softfloat/dist/fpgetsticky.c (contents, props changed) vendor/NetBSD/softfloat/dist/fpsetmask.c (contents, props changed) vendor/NetBSD/softfloat/dist/fpsetround.c (contents, props changed) vendor/NetBSD/softfloat/dist/fpsetsticky.c (contents, props changed) vendor/NetBSD/softfloat/dist/gedf2.c (contents, props changed) vendor/NetBSD/softfloat/dist/gesf2.c (contents, props changed) vendor/NetBSD/softfloat/dist/getf2.c (contents, props changed) vendor/NetBSD/softfloat/dist/gexf2.c (contents, props changed) vendor/NetBSD/softfloat/dist/gtdf2.c (contents, props changed) vendor/NetBSD/softfloat/dist/gtsf2.c (contents, props changed) vendor/NetBSD/softfloat/dist/gttf2.c (contents, props changed) vendor/NetBSD/softfloat/dist/gtxf2.c (contents, props changed) vendor/NetBSD/softfloat/dist/ledf2.c (contents, props changed) vendor/NetBSD/softfloat/dist/lesf2.c (contents, props changed) vendor/NetBSD/softfloat/dist/letf2.c (contents, props changed) vendor/NetBSD/softfloat/dist/ltdf2.c (contents, props changed) vendor/NetBSD/softfloat/dist/ltsf2.c (contents, props changed) vendor/NetBSD/softfloat/dist/lttf2.c (contents, props changed) vendor/NetBSD/softfloat/dist/nedf2.c (contents, props changed) vendor/NetBSD/softfloat/dist/negdf2.c (contents, props changed) vendor/NetBSD/softfloat/dist/negsf2.c (contents, props changed) vendor/NetBSD/softfloat/dist/negtf2.c (contents, props changed) vendor/NetBSD/softfloat/dist/negxf2.c (contents, props changed) vendor/NetBSD/softfloat/dist/nesf2.c (contents, props changed) vendor/NetBSD/softfloat/dist/netf2.c (contents, props changed) vendor/NetBSD/softfloat/dist/nexf2.c (contents, props changed) vendor/NetBSD/softfloat/dist/softfloat-for-gcc.h (contents, props changed) vendor/NetBSD/softfloat/dist/softfloat-history.txt (contents, props changed) vendor/NetBSD/softfloat/dist/softfloat-source.txt (contents, props changed) vendor/NetBSD/softfloat/dist/softfloat-specialize vendor/NetBSD/softfloat/dist/softfloat.txt (contents, props changed) vendor/NetBSD/softfloat/dist/templates/ vendor/NetBSD/softfloat/dist/templates/milieu.h (contents, props changed) vendor/NetBSD/softfloat/dist/templates/softfloat-specialize vendor/NetBSD/softfloat/dist/templates/softfloat.h (contents, props changed) vendor/NetBSD/softfloat/dist/timesoftfloat.c (contents, props changed) vendor/NetBSD/softfloat/dist/timesoftfloat.txt (contents, props changed) vendor/NetBSD/softfloat/dist/unorddf2.c (contents, props changed) vendor/NetBSD/softfloat/dist/unordsf2.c (contents, props changed) Added: vendor/NetBSD/softfloat/dist/Makefile.inc ============================================================================== --- /dev/null 00:00:00 1970 (empty, because file is newly added) +++ vendor/NetBSD/softfloat/dist/Makefile.inc Fri Jan 20 06:16:37 2012 (r230364) @@ -0,0 +1,28 @@ +# $NetBSD: Makefile.inc,v 1.10 2011/07/04 02:53:15 mrg Exp $ + +SOFTFLOAT_BITS?=64 +.PATH: ${ARCHDIR}/softfloat \ + ${.CURDIR}/softfloat/bits${SOFTFLOAT_BITS} ${.CURDIR}/softfloat + +CPPFLAGS+= -I${ARCHDIR}/softfloat -I${.CURDIR}/softfloat +CPPFLAGS+= -DSOFTFLOAT_FOR_GCC + +SRCS.softfloat= softfloat.c + +SRCS.softfloat+=fpgetround.c fpsetround.c fpgetmask.c fpsetmask.c \ + fpgetsticky.c fpsetsticky.c + +SRCS.softfloat+=eqsf2.c nesf2.c gtsf2.c gesf2.c ltsf2.c lesf2.c negsf2.c \ + eqdf2.c nedf2.c gtdf2.c gedf2.c ltdf2.c ledf2.c negdf2.c \ + eqtf2.c netf2.c gttf2.c getf2.c lttf2.c letf2.c negtf2.c \ + nexf2.c gtxf2.c gexf2.c negxf2.c unordsf2.c unorddf2.c + +SRCS+= ${SRCS.softfloat} + +# XXX +.if defined(HAVE_GCC) && ${HAVE_GCC} >= 45 && \ + (${MACHINE_CPU} == "arm" || \ + ${MACHINE_CPU} == "mips" || \ + ${MACHINE_CPU} == "sh3") +COPTS.softfloat.c+= -Wno-enum-compare +.endif Added: vendor/NetBSD/softfloat/dist/README.NetBSD ============================================================================== --- /dev/null 00:00:00 1970 (empty, because file is newly added) +++ vendor/NetBSD/softfloat/dist/README.NetBSD Fri Jan 20 06:16:37 2012 (r230364) @@ -0,0 +1,8 @@ +$NetBSD: README.NetBSD,v 1.2 2002/05/21 23:51:05 bjh21 Exp $ + +This is a modified version of part of John Hauser's SoftFloat 2a package. +This version has been heavily modified to support its use with GCC to +implement built-in floating-point operations, but compiling +softfloat.c without SOFTFLOAT_FOR_GCC defined should get you the same +results as from the original. + Added: vendor/NetBSD/softfloat/dist/README.txt ============================================================================== --- /dev/null 00:00:00 1970 (empty, because file is newly added) +++ vendor/NetBSD/softfloat/dist/README.txt Fri Jan 20 06:16:37 2012 (r230364) @@ -0,0 +1,39 @@ +$NetBSD: README.txt,v 1.1 2000/06/06 08:15:02 bjh21 Exp $ + +Package Overview for SoftFloat Release 2a + +John R. Hauser +1998 December 13 + + +SoftFloat is a software implementation of floating-point that conforms to +the IEC/IEEE Standard for Binary Floating-Point Arithmetic. SoftFloat is +distributed in the form of C source code. Compiling the SoftFloat sources +generates two things: + +-- A SoftFloat object file (typically `softfloat.o') containing the complete + set of IEC/IEEE floating-point routines. + +-- A `timesoftfloat' program for evaluating the speed of the SoftFloat + routines. (The SoftFloat module is linked into this program.) + +The SoftFloat package is documented in four text files: + + softfloat.txt Documentation for using the SoftFloat functions. + softfloat-source.txt Documentation for compiling SoftFloat. + softfloat-history.txt History of major changes to SoftFloat. + timesoftfloat.txt Documentation for using `timesoftfloat'. + +Other files in the package comprise the source code for SoftFloat. + +Please be aware that some work is involved in porting this software to other +targets. It is not just a matter of getting `make' to complete without +error messages. I would have written the code that way if I could, but +there are fundamental differences between systems that I can't make go away. +You should not attempt to compile SoftFloat without first reading both +`softfloat.txt' and `softfloat-source.txt'. + +At the time of this writing, the most up-to-date information about +SoftFloat and the latest release can be found at the Web page `http:// +HTTP.CS.Berkeley.EDU/~jhauser/arithmetic/SoftFloat.html'. + Added: vendor/NetBSD/softfloat/dist/bits32/softfloat-macros ============================================================================== --- /dev/null 00:00:00 1970 (empty, because file is newly added) +++ vendor/NetBSD/softfloat/dist/bits32/softfloat-macros Fri Jan 20 06:16:37 2012 (r230364) @@ -0,0 +1,648 @@ + +/* +=============================================================================== + +This C source fragment is part of the SoftFloat IEC/IEEE Floating-point +Arithmetic Package, Release 2a. + +Written by John R. Hauser. This work was made possible in part by the +International Computer Science Institute, located at Suite 600, 1947 Center +Street, Berkeley, California 94704. Funding was partially provided by the +National Science Foundation under grant MIP-9311980. The original version +of this code was written as part of a project to build a fixed-point vector +processor in collaboration with the University of California at Berkeley, +overseen by Profs. Nelson Morgan and John Wawrzynek. More information +is available through the Web page `http://HTTP.CS.Berkeley.EDU/~jhauser/ +arithmetic/SoftFloat.html'. + +THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort +has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT +TIMES RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO +PERSONS AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ANY +AND ALL LOSSES, COSTS, OR OTHER PROBLEMS ARISING FROM ITS USE. + +Derivative works are acceptable, even for commercial purposes, so long as +(1) they include prominent notice that the work is derivative, and (2) they +include prominent notice akin to these four paragraphs for those parts of +this code that are retained. + +=============================================================================== +*/ + +/* +------------------------------------------------------------------------------- +Shifts `a' right by the number of bits given in `count'. If any nonzero +bits are shifted off, they are ``jammed'' into the least significant bit of +the result by setting the least significant bit to 1. The value of `count' +can be arbitrarily large; in particular, if `count' is greater than 32, the +result will be either 0 or 1, depending on whether `a' is zero or nonzero. +The result is stored in the location pointed to by `zPtr'. +------------------------------------------------------------------------------- +*/ +INLINE void shift32RightJamming( bits32 a, int16 count, bits32 *zPtr ) +{ + bits32 z; + + if ( count == 0 ) { + z = a; + } + else if ( count < 32 ) { + z = ( a>>count ) | ( ( a<<( ( - count ) & 31 ) ) != 0 ); + } + else { + z = ( a != 0 ); + } + *zPtr = z; + +} + +/* +------------------------------------------------------------------------------- +Shifts the 64-bit value formed by concatenating `a0' and `a1' right by the +number of bits given in `count'. Any bits shifted off are lost. The value +of `count' can be arbitrarily large; in particular, if `count' is greater +than 64, the result will be 0. The result is broken into two 32-bit pieces +which are stored at the locations pointed to by `z0Ptr' and `z1Ptr'. +------------------------------------------------------------------------------- +*/ +INLINE void + shift64Right( + bits32 a0, bits32 a1, int16 count, bits32 *z0Ptr, bits32 *z1Ptr ) +{ + bits32 z0, z1; + int8 negCount = ( - count ) & 31; + + if ( count == 0 ) { + z1 = a1; + z0 = a0; + } + else if ( count < 32 ) { + z1 = ( a0<<negCount ) | ( a1>>count ); + z0 = a0>>count; + } + else { + z1 = ( count < 64 ) ? ( a0>>( count & 31 ) ) : 0; + z0 = 0; + } + *z1Ptr = z1; + *z0Ptr = z0; + +} + +/* +------------------------------------------------------------------------------- +Shifts the 64-bit value formed by concatenating `a0' and `a1' right by the +number of bits given in `count'. If any nonzero bits are shifted off, they +are ``jammed'' into the least significant bit of the result by setting the +least significant bit to 1. The value of `count' can be arbitrarily large; +in particular, if `count' is greater than 64, the result will be either 0 +or 1, depending on whether the concatenation of `a0' and `a1' is zero or +nonzero. The result is broken into two 32-bit pieces which are stored at +the locations pointed to by `z0Ptr' and `z1Ptr'. +------------------------------------------------------------------------------- +*/ +INLINE void + shift64RightJamming( + bits32 a0, bits32 a1, int16 count, bits32 *z0Ptr, bits32 *z1Ptr ) +{ + bits32 z0, z1; + int8 negCount = ( - count ) & 31; + + if ( count == 0 ) { + z1 = a1; + z0 = a0; + } + else if ( count < 32 ) { + z1 = ( a0<<negCount ) | ( a1>>count ) | ( ( a1<<negCount ) != 0 ); + z0 = a0>>count; + } + else { + if ( count == 32 ) { + z1 = a0 | ( a1 != 0 ); + } + else if ( count < 64 ) { + z1 = ( a0>>( count & 31 ) ) | ( ( ( a0<<negCount ) | a1 ) != 0 ); + } + else { + z1 = ( ( a0 | a1 ) != 0 ); + } + z0 = 0; + } + *z1Ptr = z1; + *z0Ptr = z0; + +} + +/* +------------------------------------------------------------------------------- +Shifts the 96-bit value formed by concatenating `a0', `a1', and `a2' right +by 32 _plus_ the number of bits given in `count'. The shifted result is +at most 64 nonzero bits; these are broken into two 32-bit pieces which are +stored at the locations pointed to by `z0Ptr' and `z1Ptr'. The bits shifted +off form a third 32-bit result as follows: The _last_ bit shifted off is +the most-significant bit of the extra result, and the other 31 bits of the +extra result are all zero if and only if _all_but_the_last_ bits shifted off +were all zero. This extra result is stored in the location pointed to by +`z2Ptr'. The value of `count' can be arbitrarily large. + (This routine makes more sense if `a0', `a1', and `a2' are considered +to form a fixed-point value with binary point between `a1' and `a2'. This +fixed-point value is shifted right by the number of bits given in `count', +and the integer part of the result is returned at the locations pointed to +by `z0Ptr' and `z1Ptr'. The fractional part of the result may be slightly +corrupted as described above, and is returned at the location pointed to by +`z2Ptr'.) +------------------------------------------------------------------------------- +*/ +INLINE void + shift64ExtraRightJamming( + bits32 a0, + bits32 a1, + bits32 a2, + int16 count, + bits32 *z0Ptr, + bits32 *z1Ptr, + bits32 *z2Ptr + ) +{ + bits32 z0, z1, z2; + int8 negCount = ( - count ) & 31; + + if ( count == 0 ) { + z2 = a2; + z1 = a1; + z0 = a0; + } + else { + if ( count < 32 ) { + z2 = a1<<negCount; + z1 = ( a0<<negCount ) | ( a1>>count ); + z0 = a0>>count; + } + else { + if ( count == 32 ) { + z2 = a1; + z1 = a0; + } + else { + a2 |= a1; + if ( count < 64 ) { + z2 = a0<<negCount; + z1 = a0>>( count & 31 ); + } + else { + z2 = ( count == 64 ) ? a0 : ( a0 != 0 ); + z1 = 0; + } + } + z0 = 0; + } + z2 |= ( a2 != 0 ); + } + *z2Ptr = z2; + *z1Ptr = z1; + *z0Ptr = z0; + +} + +/* +------------------------------------------------------------------------------- +Shifts the 64-bit value formed by concatenating `a0' and `a1' left by the +number of bits given in `count'. Any bits shifted off are lost. The value +of `count' must be less than 32. The result is broken into two 32-bit +pieces which are stored at the locations pointed to by `z0Ptr' and `z1Ptr'. +------------------------------------------------------------------------------- +*/ +INLINE void + shortShift64Left( + bits32 a0, bits32 a1, int16 count, bits32 *z0Ptr, bits32 *z1Ptr ) +{ + + *z1Ptr = a1<<count; + *z0Ptr = + ( count == 0 ) ? a0 : ( a0<<count ) | ( a1>>( ( - count ) & 31 ) ); + +} + +/* +------------------------------------------------------------------------------- +Shifts the 96-bit value formed by concatenating `a0', `a1', and `a2' left +by the number of bits given in `count'. Any bits shifted off are lost. +The value of `count' must be less than 32. The result is broken into three +32-bit pieces which are stored at the locations pointed to by `z0Ptr', +`z1Ptr', and `z2Ptr'. +------------------------------------------------------------------------------- +*/ +INLINE void + shortShift96Left( + bits32 a0, + bits32 a1, + bits32 a2, + int16 count, + bits32 *z0Ptr, + bits32 *z1Ptr, + bits32 *z2Ptr + ) +{ + bits32 z0, z1, z2; + int8 negCount; + + z2 = a2<<count; + z1 = a1<<count; + z0 = a0<<count; + if ( 0 < count ) { + negCount = ( ( - count ) & 31 ); + z1 |= a2>>negCount; + z0 |= a1>>negCount; + } + *z2Ptr = z2; + *z1Ptr = z1; + *z0Ptr = z0; + +} + +/* +------------------------------------------------------------------------------- +Adds the 64-bit value formed by concatenating `a0' and `a1' to the 64-bit +value formed by concatenating `b0' and `b1'. Addition is modulo 2^64, so +any carry out is lost. The result is broken into two 32-bit pieces which +are stored at the locations pointed to by `z0Ptr' and `z1Ptr'. +------------------------------------------------------------------------------- +*/ +INLINE void + add64( + bits32 a0, bits32 a1, bits32 b0, bits32 b1, bits32 *z0Ptr, bits32 *z1Ptr ) +{ + bits32 z1; + + z1 = a1 + b1; + *z1Ptr = z1; + *z0Ptr = a0 + b0 + ( z1 < a1 ); + +} + +/* +------------------------------------------------------------------------------- +Adds the 96-bit value formed by concatenating `a0', `a1', and `a2' to the +96-bit value formed by concatenating `b0', `b1', and `b2'. Addition is +modulo 2^96, so any carry out is lost. The result is broken into three +32-bit pieces which are stored at the locations pointed to by `z0Ptr', +`z1Ptr', and `z2Ptr'. +------------------------------------------------------------------------------- +*/ +INLINE void + add96( + bits32 a0, + bits32 a1, + bits32 a2, + bits32 b0, + bits32 b1, + bits32 b2, + bits32 *z0Ptr, + bits32 *z1Ptr, + bits32 *z2Ptr + ) +{ + bits32 z0, z1, z2; + int8 carry0, carry1; + + z2 = a2 + b2; + carry1 = ( z2 < a2 ); + z1 = a1 + b1; + carry0 = ( z1 < a1 ); + z0 = a0 + b0; + z1 += carry1; + z0 += ( z1 < (bits32)carry1 ); + z0 += carry0; + *z2Ptr = z2; + *z1Ptr = z1; + *z0Ptr = z0; + +} + +/* +------------------------------------------------------------------------------- +Subtracts the 64-bit value formed by concatenating `b0' and `b1' from the +64-bit value formed by concatenating `a0' and `a1'. Subtraction is modulo +2^64, so any borrow out (carry out) is lost. The result is broken into two +32-bit pieces which are stored at the locations pointed to by `z0Ptr' and +`z1Ptr'. +------------------------------------------------------------------------------- +*/ +INLINE void + sub64( + bits32 a0, bits32 a1, bits32 b0, bits32 b1, bits32 *z0Ptr, bits32 *z1Ptr ) +{ + + *z1Ptr = a1 - b1; + *z0Ptr = a0 - b0 - ( a1 < b1 ); + +} + +/* +------------------------------------------------------------------------------- +Subtracts the 96-bit value formed by concatenating `b0', `b1', and `b2' from +the 96-bit value formed by concatenating `a0', `a1', and `a2'. Subtraction +is modulo 2^96, so any borrow out (carry out) is lost. The result is broken +into three 32-bit pieces which are stored at the locations pointed to by +`z0Ptr', `z1Ptr', and `z2Ptr'. +------------------------------------------------------------------------------- +*/ +INLINE void + sub96( + bits32 a0, + bits32 a1, + bits32 a2, + bits32 b0, + bits32 b1, + bits32 b2, + bits32 *z0Ptr, + bits32 *z1Ptr, + bits32 *z2Ptr + ) +{ + bits32 z0, z1, z2; + int8 borrow0, borrow1; + + z2 = a2 - b2; + borrow1 = ( a2 < b2 ); + z1 = a1 - b1; + borrow0 = ( a1 < b1 ); + z0 = a0 - b0; + z0 -= ( z1 < (bits32)borrow1 ); + z1 -= borrow1; + z0 -= borrow0; + *z2Ptr = z2; + *z1Ptr = z1; + *z0Ptr = z0; + +} + +/* +------------------------------------------------------------------------------- +Multiplies `a' by `b' to obtain a 64-bit product. The product is broken +into two 32-bit pieces which are stored at the locations pointed to by +`z0Ptr' and `z1Ptr'. +------------------------------------------------------------------------------- +*/ +INLINE void mul32To64( bits32 a, bits32 b, bits32 *z0Ptr, bits32 *z1Ptr ) +{ + bits16 aHigh, aLow, bHigh, bLow; + bits32 z0, zMiddleA, zMiddleB, z1; + + aLow = a; + aHigh = a>>16; + bLow = b; + bHigh = b>>16; + z1 = ( (bits32) aLow ) * bLow; + zMiddleA = ( (bits32) aLow ) * bHigh; + zMiddleB = ( (bits32) aHigh ) * bLow; + z0 = ( (bits32) aHigh ) * bHigh; + zMiddleA += zMiddleB; + z0 += ( ( (bits32) ( zMiddleA < zMiddleB ) )<<16 ) + ( zMiddleA>>16 ); + zMiddleA <<= 16; + z1 += zMiddleA; + z0 += ( z1 < zMiddleA ); + *z1Ptr = z1; + *z0Ptr = z0; + +} + +/* +------------------------------------------------------------------------------- +Multiplies the 64-bit value formed by concatenating `a0' and `a1' by `b' +to obtain a 96-bit product. The product is broken into three 32-bit pieces +which are stored at the locations pointed to by `z0Ptr', `z1Ptr', and +`z2Ptr'. +------------------------------------------------------------------------------- +*/ +INLINE void + mul64By32To96( + bits32 a0, + bits32 a1, + bits32 b, + bits32 *z0Ptr, + bits32 *z1Ptr, + bits32 *z2Ptr + ) +{ + bits32 z0, z1, z2, more1; + + mul32To64( a1, b, &z1, &z2 ); + mul32To64( a0, b, &z0, &more1 ); + add64( z0, more1, 0, z1, &z0, &z1 ); + *z2Ptr = z2; + *z1Ptr = z1; + *z0Ptr = z0; + +} + +/* +------------------------------------------------------------------------------- +Multiplies the 64-bit value formed by concatenating `a0' and `a1' to the +64-bit value formed by concatenating `b0' and `b1' to obtain a 128-bit +product. The product is broken into four 32-bit pieces which are stored at +the locations pointed to by `z0Ptr', `z1Ptr', `z2Ptr', and `z3Ptr'. +------------------------------------------------------------------------------- +*/ +INLINE void + mul64To128( + bits32 a0, + bits32 a1, + bits32 b0, + bits32 b1, + bits32 *z0Ptr, + bits32 *z1Ptr, + bits32 *z2Ptr, + bits32 *z3Ptr + ) +{ + bits32 z0, z1, z2, z3; + bits32 more1, more2; + + mul32To64( a1, b1, &z2, &z3 ); + mul32To64( a1, b0, &z1, &more2 ); + add64( z1, more2, 0, z2, &z1, &z2 ); + mul32To64( a0, b0, &z0, &more1 ); + add64( z0, more1, 0, z1, &z0, &z1 ); + mul32To64( a0, b1, &more1, &more2 ); + add64( more1, more2, 0, z2, &more1, &z2 ); + add64( z0, z1, 0, more1, &z0, &z1 ); + *z3Ptr = z3; + *z2Ptr = z2; + *z1Ptr = z1; + *z0Ptr = z0; + +} + +/* +------------------------------------------------------------------------------- +Returns an approximation to the 32-bit integer quotient obtained by dividing +`b' into the 64-bit value formed by concatenating `a0' and `a1'. The +divisor `b' must be at least 2^31. If q is the exact quotient truncated +toward zero, the approximation returned lies between q and q + 2 inclusive. +If the exact quotient q is larger than 32 bits, the maximum positive 32-bit +unsigned integer is returned. +------------------------------------------------------------------------------- +*/ +static bits32 estimateDiv64To32( bits32 a0, bits32 a1, bits32 b ) +{ + bits32 b0, b1; + bits32 rem0, rem1, term0, term1; + bits32 z; + + if ( b <= a0 ) return 0xFFFFFFFF; + b0 = b>>16; + z = ( b0<<16 <= a0 ) ? 0xFFFF0000 : ( a0 / b0 )<<16; + mul32To64( b, z, &term0, &term1 ); + sub64( a0, a1, term0, term1, &rem0, &rem1 ); + while ( ( (sbits32) rem0 ) < 0 ) { + z -= 0x10000; + b1 = b<<16; + add64( rem0, rem1, b0, b1, &rem0, &rem1 ); + } + rem0 = ( rem0<<16 ) | ( rem1>>16 ); + z |= ( b0<<16 <= rem0 ) ? 0xFFFF : rem0 / b0; + return z; + +} + +#ifndef SOFTFLOAT_FOR_GCC +/* +------------------------------------------------------------------------------- +Returns an approximation to the square root of the 32-bit significand given +by `a'. Considered as an integer, `a' must be at least 2^31. If bit 0 of +`aExp' (the least significant bit) is 1, the integer returned approximates +2^31*sqrt(`a'/2^31), where `a' is considered an integer. If bit 0 of `aExp' +is 0, the integer returned approximates 2^31*sqrt(`a'/2^30). In either +case, the approximation returned lies strictly within +/-2 of the exact +value. +------------------------------------------------------------------------------- +*/ +static bits32 estimateSqrt32( int16 aExp, bits32 a ) +{ + static const bits16 sqrtOddAdjustments[] = { + 0x0004, 0x0022, 0x005D, 0x00B1, 0x011D, 0x019F, 0x0236, 0x02E0, + 0x039C, 0x0468, 0x0545, 0x0631, 0x072B, 0x0832, 0x0946, 0x0A67 + }; + static const bits16 sqrtEvenAdjustments[] = { + 0x0A2D, 0x08AF, 0x075A, 0x0629, 0x051A, 0x0429, 0x0356, 0x029E, + 0x0200, 0x0179, 0x0109, 0x00AF, 0x0068, 0x0034, 0x0012, 0x0002 + }; + int8 index; + bits32 z; + + index = ( a>>27 ) & 15; + if ( aExp & 1 ) { + z = 0x4000 + ( a>>17 ) - sqrtOddAdjustments[ index ]; + z = ( ( a / z )<<14 ) + ( z<<15 ); + a >>= 1; + } + else { + z = 0x8000 + ( a>>17 ) - sqrtEvenAdjustments[ index ]; + z = a / z + z; + z = ( 0x20000 <= z ) ? 0xFFFF8000 : ( z<<15 ); + if ( z <= a ) return (bits32) ( ( (sbits32) a )>>1 ); + } + return ( ( estimateDiv64To32( a, 0, z ) )>>1 ) + ( z>>1 ); + +} +#endif + +/* +------------------------------------------------------------------------------- +Returns the number of leading 0 bits before the most-significant 1 bit of +`a'. If `a' is zero, 32 is returned. +------------------------------------------------------------------------------- +*/ +static int8 countLeadingZeros32( bits32 a ) +{ + static const int8 countLeadingZerosHigh[] = { + 8, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + }; + int8 shiftCount; + + shiftCount = 0; + if ( a < 0x10000 ) { + shiftCount += 16; + a <<= 16; + } + if ( a < 0x1000000 ) { + shiftCount += 8; + a <<= 8; + } + shiftCount += countLeadingZerosHigh[ a>>24 ]; + return shiftCount; + +} + +/* +------------------------------------------------------------------------------- +Returns 1 if the 64-bit value formed by concatenating `a0' and `a1' is +equal to the 64-bit value formed by concatenating `b0' and `b1'. Otherwise, +returns 0. +------------------------------------------------------------------------------- +*/ +INLINE flag eq64( bits32 a0, bits32 a1, bits32 b0, bits32 b1 ) +{ + + return ( a0 == b0 ) && ( a1 == b1 ); + +} + +/* +------------------------------------------------------------------------------- +Returns 1 if the 64-bit value formed by concatenating `a0' and `a1' is less +than or equal to the 64-bit value formed by concatenating `b0' and `b1'. +Otherwise, returns 0. +------------------------------------------------------------------------------- +*/ +INLINE flag le64( bits32 a0, bits32 a1, bits32 b0, bits32 b1 ) +{ + + return ( a0 < b0 ) || ( ( a0 == b0 ) && ( a1 <= b1 ) ); + +} + +/* +------------------------------------------------------------------------------- +Returns 1 if the 64-bit value formed by concatenating `a0' and `a1' is less +than the 64-bit value formed by concatenating `b0' and `b1'. Otherwise, +returns 0. +------------------------------------------------------------------------------- +*/ +INLINE flag lt64( bits32 a0, bits32 a1, bits32 b0, bits32 b1 ) +{ + + return ( a0 < b0 ) || ( ( a0 == b0 ) && ( a1 < b1 ) ); + +} + +/* +------------------------------------------------------------------------------- +Returns 1 if the 64-bit value formed by concatenating `a0' and `a1' is not +equal to the 64-bit value formed by concatenating `b0' and `b1'. Otherwise, +returns 0. +------------------------------------------------------------------------------- +*/ +INLINE flag ne64( bits32 a0, bits32 a1, bits32 b0, bits32 b1 ) +{ + + return ( a0 != b0 ) || ( a1 != b1 ); + +} + Added: vendor/NetBSD/softfloat/dist/bits32/softfloat.c ============================================================================== --- /dev/null 00:00:00 1970 (empty, because file is newly added) +++ vendor/NetBSD/softfloat/dist/bits32/softfloat.c Fri Jan 20 06:16:37 2012 (r230364) @@ -0,0 +1,2349 @@ +/* $NetBSD: softfloat.c,v 1.1 2002/05/21 23:51:07 bjh21 Exp $ */ + +/* + * This version hacked for use with gcc -msoft-float by bjh21. + * (Mostly a case of #ifdefing out things GCC doesn't need or provides + * itself). + */ + +/* + * Things you may want to define: + * + * SOFTFLOAT_FOR_GCC - build only those functions necessary for GCC (with + * -msoft-float) to work. Include "softfloat-for-gcc.h" to get them + * properly renamed. + */ + +/* + * This differs from the standard bits32/softfloat.c in that float64 + * is defined to be a 64-bit integer rather than a structure. The + * structure is float64s, with translation between the two going via + * float64u. + */ + +/* +=============================================================================== + +This C source file is part of the SoftFloat IEC/IEEE Floating-Point +Arithmetic Package, Release 2a. + +Written by John R. Hauser. This work was made possible in part by the +International Computer Science Institute, located at Suite 600, 1947 Center +Street, Berkeley, California 94704. Funding was partially provided by the +National Science Foundation under grant MIP-9311980. The original version +of this code was written as part of a project to build a fixed-point vector +processor in collaboration with the University of California at Berkeley, +overseen by Profs. Nelson Morgan and John Wawrzynek. More information +is available through the Web page `http://HTTP.CS.Berkeley.EDU/~jhauser/ +arithmetic/SoftFloat.html'. + +THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort +has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT +TIMES RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO +PERSONS AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ANY +AND ALL LOSSES, COSTS, OR OTHER PROBLEMS ARISING FROM ITS USE. + +Derivative works are acceptable, even for commercial purposes, so long as +(1) they include prominent notice that the work is derivative, and (2) they +include prominent notice akin to these four paragraphs for those parts of +this code that are retained. + +=============================================================================== +*/ + +#include <sys/cdefs.h> +#if defined(LIBC_SCCS) && !defined(lint) +__RCSID("$NetBSD: softfloat.c,v 1.1 2002/05/21 23:51:07 bjh21 Exp $"); +#endif /* LIBC_SCCS and not lint */ + +#ifdef SOFTFLOAT_FOR_GCC +#include "softfloat-for-gcc.h" +#endif + +#include "milieu.h" +#include "softfloat.h" + +/* + * Conversions between floats as stored in memory and floats as + * SoftFloat uses them + */ +#ifndef FLOAT64_DEMANGLE +#define FLOAT64_DEMANGLE(a) (a) +#endif +#ifndef FLOAT64_MANGLE +#define FLOAT64_MANGLE(a) (a) +#endif + +/* +------------------------------------------------------------------------------- +Floating-point rounding mode and exception flags. +------------------------------------------------------------------------------- +*/ +fp_rnd float_rounding_mode = float_round_nearest_even; +fp_except float_exception_flags = 0; + +/* +------------------------------------------------------------------------------- +Primitive arithmetic functions, including multi-word arithmetic, and +division and square root approximations. (Can be specialized to target if +desired.) +------------------------------------------------------------------------------- +*/ +#include "softfloat-macros" + +/* +------------------------------------------------------------------------------- +Functions and definitions to determine: (1) whether tininess for underflow +is detected before or after rounding by default, (2) what (if anything) +happens when exceptions are raised, (3) how signaling NaNs are distinguished +from quiet NaNs, (4) the default generated quiet NaNs, and (4) how NaNs +are propagated from function inputs to output. These details are target- +specific. +------------------------------------------------------------------------------- +*/ +#include "softfloat-specialize" + +/* +------------------------------------------------------------------------------- +Returns the fraction bits of the single-precision floating-point value `a'. +------------------------------------------------------------------------------- +*/ +INLINE bits32 extractFloat32Frac( float32 a ) +{ + + return a & 0x007FFFFF; + +} + +/* +------------------------------------------------------------------------------- +Returns the exponent bits of the single-precision floating-point value `a'. +------------------------------------------------------------------------------- +*/ +INLINE int16 extractFloat32Exp( float32 a ) +{ + + return ( a>>23 ) & 0xFF; + +} + +/* +------------------------------------------------------------------------------- +Returns the sign bit of the single-precision floating-point value `a'. +------------------------------------------------------------------------------- +*/ +INLINE flag extractFloat32Sign( float32 a ) +{ + + return a>>31; + +} + +/* +------------------------------------------------------------------------------- +Normalizes the subnormal single-precision floating-point value represented +by the denormalized significand `aSig'. The normalized exponent and +significand are stored at the locations pointed to by `zExpPtr' and +`zSigPtr', respectively. +------------------------------------------------------------------------------- +*/ +static void + normalizeFloat32Subnormal( bits32 aSig, int16 *zExpPtr, bits32 *zSigPtr ) +{ + int8 shiftCount; + + shiftCount = countLeadingZeros32( aSig ) - 8; + *zSigPtr = aSig<<shiftCount; + *zExpPtr = 1 - shiftCount; + +} + +/* +------------------------------------------------------------------------------- +Packs the sign `zSign', exponent `zExp', and significand `zSig' into a +single-precision floating-point value, returning the result. After being +shifted into the proper positions, the three fields are simply added +together to form the result. This means that any integer portion of `zSig' +will be added into the exponent. Since a properly normalized significand +will have an integer portion equal to 1, the `zExp' input should be 1 less +than the desired result exponent whenever `zSig' is a complete, normalized +significand. +------------------------------------------------------------------------------- +*/ +INLINE float32 packFloat32( flag zSign, int16 zExp, bits32 zSig ) +{ + + return ( ( (bits32) zSign )<<31 ) + ( ( (bits32) zExp )<<23 ) + zSig; + +} + +/* +------------------------------------------------------------------------------- +Takes an abstract floating-point value having sign `zSign', exponent `zExp', +and significand `zSig', and returns the proper single-precision floating- +point value corresponding to the abstract input. Ordinarily, the abstract +value is simply rounded and packed into the single-precision format, with +the inexact exception raised if the abstract input cannot be represented +exactly. However, if the abstract value is too large, the overflow and +inexact exceptions are raised and an infinity or maximal finite value is +returned. If the abstract value is too small, the input value is rounded to +a subnormal number, and the underflow and inexact exceptions are raised if +the abstract input cannot be represented exactly as a subnormal single- +precision floating-point number. + The input significand `zSig' has its binary point between bits 30 +and 29, which is 7 bits to the left of the usual location. This shifted +significand must be normalized or smaller. If `zSig' is not normalized, +`zExp' must be 0; in that case, the result returned is a subnormal number, +and it must not require rounding. In the usual case that `zSig' is +normalized, `zExp' must be 1 less than the ``true'' floating-point exponent. +The handling of underflow and overflow follows the IEC/IEEE Standard for +Binary Floating-Point Arithmetic. +------------------------------------------------------------------------------- +*/ +static float32 roundAndPackFloat32( flag zSign, int16 zExp, bits32 zSig ) +{ + int8 roundingMode; + flag roundNearestEven; + int8 roundIncrement, roundBits; + flag isTiny; + + roundingMode = float_rounding_mode; + roundNearestEven = roundingMode == float_round_nearest_even; + roundIncrement = 0x40; + if ( ! roundNearestEven ) { + if ( roundingMode == float_round_to_zero ) { + roundIncrement = 0; + } + else { + roundIncrement = 0x7F; + if ( zSign ) { + if ( roundingMode == float_round_up ) roundIncrement = 0; + } + else { + if ( roundingMode == float_round_down ) roundIncrement = 0; + } + } + } + roundBits = zSig & 0x7F; + if ( 0xFD <= (bits16) zExp ) { + if ( ( 0xFD < zExp ) + || ( ( zExp == 0xFD ) + && ( (sbits32) ( zSig + roundIncrement ) < 0 ) ) + ) { + float_raise( float_flag_overflow | float_flag_inexact ); + return packFloat32( zSign, 0xFF, 0 ) - ( roundIncrement == 0 ); + } + if ( zExp < 0 ) { + isTiny = + ( float_detect_tininess == float_tininess_before_rounding ) + || ( zExp < -1 ) + || ( zSig + roundIncrement < 0x80000000 ); + shift32RightJamming( zSig, - zExp, &zSig ); + zExp = 0; + roundBits = zSig & 0x7F; + if ( isTiny && roundBits ) float_raise( float_flag_underflow ); + } + } + if ( roundBits ) float_exception_flags |= float_flag_inexact; + zSig = ( zSig + roundIncrement )>>7; + zSig &= ~ ( ( ( roundBits ^ 0x40 ) == 0 ) & roundNearestEven ); + if ( zSig == 0 ) zExp = 0; + return packFloat32( zSign, zExp, zSig ); + +} + +/* +------------------------------------------------------------------------------- +Takes an abstract floating-point value having sign `zSign', exponent `zExp', +and significand `zSig', and returns the proper single-precision floating- +point value corresponding to the abstract input. This routine is just like +`roundAndPackFloat32' except that `zSig' does not have to be normalized. +Bit 31 of `zSig' must be zero, and `zExp' must be 1 less than the ``true'' +floating-point exponent. *** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201201200616.q0K6GbHo026916>