From 74c6854fd8dcbaee736ac0421805ff1e03c4a1e2 Mon Sep 17 00:00:00 2001 From: iamn1ck Date: Sun, 19 Feb 2017 07:00:34 -0600 Subject: -quadtree and opengl rendering are now in the master branch ! -using sdl_rect for location and size ended up being not so great due to it not having floats, so we reverted back to using location -much, much refractoring is now needed --- inc/glm/detail/func_integer.inl | 393 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 393 insertions(+) create mode 100644 inc/glm/detail/func_integer.inl (limited to 'inc/glm/detail/func_integer.inl') diff --git a/inc/glm/detail/func_integer.inl b/inc/glm/detail/func_integer.inl new file mode 100644 index 0000000..5702a15 --- /dev/null +++ b/inc/glm/detail/func_integer.inl @@ -0,0 +1,393 @@ +/////////////////////////////////////////////////////////////////////////////////// +/// OpenGL Mathematics (glm.g-truc.net) +/// +/// Copyright (c) 2005 - 2015 G-Truc Creation (www.g-truc.net) +/// Permission is hereby granted, free of charge, to any person obtaining a copy +/// of this software and associated documentation files (the "Software"), to deal +/// in the Software without restriction, including without limitation the rights +/// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +/// copies of the Software, and to permit persons to whom the Software is +/// furnished to do so, subject to the following conditions: +/// +/// The above copyright notice and this permission notice shall be included in +/// all copies or substantial portions of the Software. +/// +/// Restrictions: +/// By making use of the Software for military purposes, you choose to make +/// a Bunny unhappy. +/// +/// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +/// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +/// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +/// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +/// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +/// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +/// THE SOFTWARE. +/// +/// @ref core +/// @file glm/detail/func_integer.inl +/// @date 2010-03-17 / 2011-06-15 +/// @author Christophe Riccio +/////////////////////////////////////////////////////////////////////////////////// + +#include "type_vec2.hpp" +#include "type_vec3.hpp" +#include "type_vec4.hpp" +#include "type_int.hpp" +#include "_vectorize.hpp" +#if(GLM_ARCH != GLM_ARCH_PURE) +#if(GLM_COMPILER & GLM_COMPILER_VC) +# include +# pragma intrinsic(_BitScanReverse) +#endif//(GLM_COMPILER & GLM_COMPILER_VC) +#endif//(GLM_ARCH != GLM_ARCH_PURE) +#include + +namespace glm{ +namespace detail +{ + template + GLM_FUNC_QUALIFIER T mask(T Bits) + { + return Bits >= sizeof(T) * 8 ? ~static_cast(0) : (static_cast(1) << Bits) - static_cast(1); + } + + template + struct compute_bitfieldReverseStep + { + template class vecType> + GLM_FUNC_QUALIFIER static vecType call(vecType const & v, T, T) + { + return v; + } + }; + + template <> + struct compute_bitfieldReverseStep + { + template class vecType> + GLM_FUNC_QUALIFIER static vecType call(vecType const & v, T Mask, T Shift) + { + return (v & Mask) << Shift | (v & (~Mask)) >> Shift; + } + }; + + template + struct compute_bitfieldBitCountStep + { + template class vecType> + GLM_FUNC_QUALIFIER static vecType call(vecType const & v, T, T) + { + return v; + } + }; + + template <> + struct compute_bitfieldBitCountStep + { + template class vecType> + GLM_FUNC_QUALIFIER static vecType call(vecType const & v, T Mask, T Shift) + { + return (v & Mask) + ((v >> Shift) & Mask); + } + }; + + template + struct compute_findLSB + { + GLM_FUNC_QUALIFIER static int call(genIUType Value) + { + if(Value == 0) + return -1; + + return glm::bitCount(~Value & (Value - static_cast(1))); + } + }; + +# if GLM_HAS_BITSCAN_WINDOWS + template + struct compute_findLSB + { + GLM_FUNC_QUALIFIER static int call(genIUType Value) + { + unsigned long Result(0); + unsigned char IsNotNull = _BitScanForward(&Result, *reinterpret_cast(&Value)); + return IsNotNull ? int(Result) : -1; + } + }; + +# if !((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_MODEL == GLM_MODEL_32)) + template + struct compute_findLSB + { + GLM_FUNC_QUALIFIER static int call(genIUType Value) + { + unsigned long Result(0); + unsigned char IsNotNull = _BitScanForward64(&Result, *reinterpret_cast(&Value)); + return IsNotNull ? int(Result) : -1; + } + }; +# endif +# endif//GLM_HAS_BITSCAN_WINDOWS + + template class vecType, bool EXEC = true> + struct compute_findMSB_step_vec + { + GLM_FUNC_QUALIFIER static vecType call(vecType const & x, T Shift) + { + return x | (x >> Shift); + } + }; + + template class vecType> + struct compute_findMSB_step_vec + { + GLM_FUNC_QUALIFIER static vecType call(vecType const & x, T) + { + return x; + } + }; + + template class vecType, int> + struct compute_findMSB_vec + { + GLM_FUNC_QUALIFIER static vecType call(vecType const & vec) + { + vecType x(vec); + x = compute_findMSB_step_vec= 8>::call(x, static_cast( 1)); + x = compute_findMSB_step_vec= 8>::call(x, static_cast( 2)); + x = compute_findMSB_step_vec= 8>::call(x, static_cast( 4)); + x = compute_findMSB_step_vec= 16>::call(x, static_cast( 8)); + x = compute_findMSB_step_vec= 32>::call(x, static_cast(16)); + x = compute_findMSB_step_vec= 64>::call(x, static_cast(32)); + return vecType(sizeof(T) * 8 - 1) - glm::bitCount(~x); + } + }; + +# if GLM_HAS_BITSCAN_WINDOWS + template + GLM_FUNC_QUALIFIER int compute_findMSB_32(genIUType Value) + { + unsigned long Result(0); + unsigned char IsNotNull = _BitScanReverse(&Result, *reinterpret_cast(&Value)); + return IsNotNull ? int(Result) : -1; + } + + template class vecType> + struct compute_findMSB_vec + { + GLM_FUNC_QUALIFIER static vecType call(vecType const & x) + { + return detail::functor1::call(compute_findMSB_32, x); + } + }; + +# if !((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_MODEL == GLM_MODEL_32)) + template + GLM_FUNC_QUALIFIER int compute_findMSB_64(genIUType Value) + { + unsigned long Result(0); + unsigned char IsNotNull = _BitScanReverse64(&Result, *reinterpret_cast(&Value)); + return IsNotNull ? int(Result) : -1; + } + + template class vecType> + struct compute_findMSB_vec + { + GLM_FUNC_QUALIFIER static vecType call(vecType const & x) + { + return detail::functor1::call(compute_findMSB_64, x); + } + }; +# endif +# endif//GLM_HAS_BITSCAN_WINDOWS +}//namespace detail + + // uaddCarry + GLM_FUNC_QUALIFIER uint uaddCarry(uint const & x, uint const & y, uint & Carry) + { + uint64 const Value64(static_cast(x) + static_cast(y)); + uint64 const Max32((static_cast(1) << static_cast(32)) - static_cast(1)); + Carry = Value64 > Max32 ? 1 : 0; + return static_cast(Value64 % (Max32 + static_cast(1))); + } + + template class vecType> + GLM_FUNC_QUALIFIER vecType uaddCarry(vecType const & x, vecType const & y, vecType & Carry) + { + vecType Value64(vecType(x) + vecType(y)); + vecType Max32((static_cast(1) << static_cast(32)) - static_cast(1)); + Carry = mix(vecType(0), vecType(1), greaterThan(Value64, Max32)); + return vecType(Value64 % (Max32 + static_cast(1))); + } + + // usubBorrow + GLM_FUNC_QUALIFIER uint usubBorrow(uint const & x, uint const & y, uint & Borrow) + { + GLM_STATIC_ASSERT(sizeof(uint) == sizeof(uint32), "uint and uint32 size mismatch"); + + Borrow = x >= y ? static_cast(0) : static_cast(1); + if(y >= x) + return y - x; + else + return static_cast((static_cast(1) << static_cast(32)) + (static_cast(y) - static_cast(x))); + } + + template class vecType> + GLM_FUNC_QUALIFIER vecType usubBorrow(vecType const & x, vecType const & y, vecType & Borrow) + { + Borrow = mix(vecType(1), vecType(0), greaterThanEqual(x, y)); + vecType const YgeX(y - x); + vecType const XgeY(vecType((static_cast(1) << static_cast(32)) + (vecType(y) - vecType(x)))); + return mix(XgeY, YgeX, greaterThanEqual(y, x)); + } + + // umulExtended + GLM_FUNC_QUALIFIER void umulExtended(uint const & x, uint const & y, uint & msb, uint & lsb) + { + GLM_STATIC_ASSERT(sizeof(uint) == sizeof(uint32), "uint and uint32 size mismatch"); + + uint64 Value64 = static_cast(x) * static_cast(y); + uint32* PointerMSB = (reinterpret_cast(&Value64) + 1); + msb = *PointerMSB; + uint32* PointerLSB = (reinterpret_cast(&Value64) + 0); + lsb = *PointerLSB; + } + + template class vecType> + GLM_FUNC_QUALIFIER void umulExtended(vecType const & x, vecType const & y, vecType & msb, vecType & lsb) + { + GLM_STATIC_ASSERT(sizeof(uint) == sizeof(uint32), "uint and uint32 size mismatch"); + + vecType Value64(vecType(x) * vecType(y)); + msb = vecType(Value64 >> static_cast(32)); + lsb = vecType(Value64); + } + + // imulExtended + GLM_FUNC_QUALIFIER void imulExtended(int x, int y, int & msb, int & lsb) + { + GLM_STATIC_ASSERT(sizeof(int) == sizeof(int32), "int and int32 size mismatch"); + + int64 Value64 = static_cast(x) * static_cast(y); + int32* PointerMSB = (reinterpret_cast(&Value64) + 1); + msb = *PointerMSB; + int32* PointerLSB = (reinterpret_cast(&Value64)); + lsb = *PointerLSB; + } + + template class vecType> + GLM_FUNC_QUALIFIER void imulExtended(vecType const & x, vecType const & y, vecType & msb, vecType & lsb) + { + GLM_STATIC_ASSERT(sizeof(int) == sizeof(int32), "int and int32 size mismatch"); + + vecType Value64(vecType(x) * vecType(y)); + lsb = vecType(Value64 & static_cast(0xFFFFFFFF)); + msb = vecType((Value64 >> static_cast(32)) & static_cast(0xFFFFFFFF)); + } + + // bitfieldExtract + template + GLM_FUNC_QUALIFIER genIUType bitfieldExtract(genIUType Value, int Offset, int Bits) + { + return bitfieldExtract(tvec1(Value), Offset, Bits).x; + } + + template class vecType> + GLM_FUNC_QUALIFIER vecType bitfieldExtract(vecType const & Value, int Offset, int Bits) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'bitfieldExtract' only accept integer inputs"); + + return (Value >> static_cast(Offset)) & static_cast(detail::mask(Bits)); + } + + // bitfieldInsert + template + GLM_FUNC_QUALIFIER genIUType bitfieldInsert(genIUType const & Base, genIUType const & Insert, int Offset, int Bits) + { + return bitfieldInsert(tvec1(Base), tvec1(Insert), Offset, Bits).x; + } + + template class vecType> + GLM_FUNC_QUALIFIER vecType bitfieldInsert(vecType const & Base, vecType const & Insert, int Offset, int Bits) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'bitfieldInsert' only accept integer values"); + + T const Mask = static_cast(detail::mask(Bits) << Offset); + return (Base & ~Mask) | (Insert & Mask); + } + + // bitfieldReverse + template + GLM_FUNC_QUALIFIER genType bitfieldReverse(genType x) + { + return bitfieldReverse(glm::tvec1(x)).x; + } + + template class vecType> + GLM_FUNC_QUALIFIER vecType bitfieldReverse(vecType const & v) + { + vecType x(v); + x = detail::compute_bitfieldReverseStep= 2>::call(x, T(0x5555555555555555ull), static_cast( 1)); + x = detail::compute_bitfieldReverseStep= 4>::call(x, T(0x3333333333333333ull), static_cast( 2)); + x = detail::compute_bitfieldReverseStep= 8>::call(x, T(0x0F0F0F0F0F0F0F0Full), static_cast( 4)); + x = detail::compute_bitfieldReverseStep= 16>::call(x, T(0x00FF00FF00FF00FFull), static_cast( 8)); + x = detail::compute_bitfieldReverseStep= 32>::call(x, T(0x0000FFFF0000FFFFull), static_cast(16)); + x = detail::compute_bitfieldReverseStep= 64>::call(x, T(0x00000000FFFFFFFFull), static_cast(32)); + return x; + } + + // bitCount + template + GLM_FUNC_QUALIFIER int bitCount(genType x) + { + return bitCount(glm::tvec1(x)).x; + } + + template class vecType> + GLM_FUNC_QUALIFIER vecType bitCount(vecType const & v) + { + vecType::type, P> x(*reinterpret_cast::type, P> const *>(&v)); + x = detail::compute_bitfieldBitCountStep= 2>::call(x, typename detail::make_unsigned::type(0x5555555555555555ull), typename detail::make_unsigned::type( 1)); + x = detail::compute_bitfieldBitCountStep= 4>::call(x, typename detail::make_unsigned::type(0x3333333333333333ull), typename detail::make_unsigned::type( 2)); + x = detail::compute_bitfieldBitCountStep= 8>::call(x, typename detail::make_unsigned::type(0x0F0F0F0F0F0F0F0Full), typename detail::make_unsigned::type( 4)); + x = detail::compute_bitfieldBitCountStep= 16>::call(x, typename detail::make_unsigned::type(0x00FF00FF00FF00FFull), typename detail::make_unsigned::type( 8)); + x = detail::compute_bitfieldBitCountStep= 32>::call(x, typename detail::make_unsigned::type(0x0000FFFF0000FFFFull), typename detail::make_unsigned::type(16)); + x = detail::compute_bitfieldBitCountStep= 64>::call(x, typename detail::make_unsigned::type(0x00000000FFFFFFFFull), typename detail::make_unsigned::type(32)); + return vecType(x); + } + + // findLSB + template + GLM_FUNC_QUALIFIER int findLSB(genIUType Value) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'findLSB' only accept integer values"); + + return detail::compute_findLSB::call(Value); + } + + template class vecType> + GLM_FUNC_QUALIFIER vecType findLSB(vecType const & x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'findLSB' only accept integer values"); + + return detail::functor1::call(findLSB, x); + } + + // findMSB + template + GLM_FUNC_QUALIFIER int findMSB(genIUType x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'findMSB' only accept integer values"); + + return findMSB(tvec1(x)).x; + } + + template class vecType> + GLM_FUNC_QUALIFIER vecType findMSB(vecType const & x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'findMSB' only accept integer values"); + + return detail::compute_findMSB_vec::call(x); + } +}//namespace glm -- cgit v1.2.3