25 #ifdef BT_USE_DOUBLE_PRECISION
26 #define btVector3Data btVector3DoubleData
27 #define btVector3DataName "btVector3DoubleData"
29 #define btVector3Data btVector3FloatData
30 #define btVector3DataName "btVector3FloatData"
31 #endif //BT_USE_DOUBLE_PRECISION
33 #if defined BT_USE_SSE
38 #pragma warning(disable: 4556) // value of intrinsic immediate argument '4294967239' is out of range '0 - 255'
42 #define BT_SHUFFLE(x,y,z,w) ((w)<<6 | (z)<<4 | (y)<<2 | (x))
44 #define bt_pshufd_ps( _a, _mask ) _mm_shuffle_ps((_a), (_a), (_mask) )
45 #define bt_splat3_ps( _a, _i ) bt_pshufd_ps((_a), BT_SHUFFLE(_i,_i,_i, 3) )
46 #define bt_splat_ps( _a, _i ) bt_pshufd_ps((_a), BT_SHUFFLE(_i,_i,_i,_i) )
48 #define btv3AbsiMask (_mm_set_epi32(0x00000000, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF))
49 #define btvAbsMask (_mm_set_epi32( 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF))
50 #define btvFFF0Mask (_mm_set_epi32(0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF))
51 #define btv3AbsfMask btCastiTo128f(btv3AbsiMask)
52 #define btvFFF0fMask btCastiTo128f(btvFFF0Mask)
53 #define btvxyzMaskf btvFFF0fMask
54 #define btvAbsfMask btCastiTo128f(btvAbsMask)
57 #define btvMzeroMask (_mm_set_ps(-0.0f, -0.0f, -0.0f, -0.0f))
58 #define v1110 (_mm_set_ps(0.0f, 1.0f, 1.0f, 1.0f))
59 #define vHalf (_mm_set_ps(0.5f, 0.5f, 0.5f, 0.5f))
60 #define v1_5 (_mm_set_ps(1.5f, 1.5f, 1.5f, 1.5f))
71 const float32x4_t
ATTRIBUTE_ALIGNED16(btvMzeroMask) = (float32x4_t){-0.0f, -0.0f, -0.0f, -0.0f};
73 static_cast<int32_t>(0xFFFFFFFF),
static_cast<int32_t>(0xFFFFFFFF), 0x0};
74 const int32x4_t
ATTRIBUTE_ALIGNED16(btvAbsMask) = (int32x4_t){0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF};
75 const int32x4_t
ATTRIBUTE_ALIGNED16(btv3AbsMask) = (int32x4_t){0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x0};
89 #if defined (__SPU__) && defined (__CELLOS_LV2__)
97 #else //__CELLOS_LV2__ __SPU__
98 #if defined (BT_USE_SSE) || defined(BT_USE_NEON) // _WIN32 || ARM
100 btSimdFloat4 mVec128;
114 #endif //__CELLOS_LV2__ __SPU__
139 #if (defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE) )|| defined (BT_USE_NEON)
149 mVec128 = rhs.mVec128;
160 #endif // #if defined (BT_USE_SSE_IN_API) || defined (BT_USE_NEON)
166 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
167 mVec128 = _mm_add_ps(mVec128, v.mVec128);
168 #elif defined(BT_USE_NEON)
169 mVec128 = vaddq_f32(mVec128, v.mVec128);
183 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
184 mVec128 = _mm_sub_ps(mVec128, v.mVec128);
185 #elif defined(BT_USE_NEON)
186 mVec128 = vsubq_f32(mVec128, v.mVec128);
199 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
200 __m128 vs = _mm_load_ss(&s);
201 vs = bt_pshufd_ps(vs, 0x80);
202 mVec128 = _mm_mul_ps(mVec128, vs);
203 #elif defined(BT_USE_NEON)
204 mVec128 = vmulq_n_f32(mVec128, s);
219 #if 0 //defined(BT_USE_SSE_IN_API)
221 __m128 vs = _mm_load_ss(&s);
222 vs = _mm_div_ss(v1110, vs);
223 vs = bt_pshufd_ps(vs, 0x00);
225 mVec128 = _mm_mul_ps(mVec128, vs);
237 #if defined BT_USE_SIMD_VECTOR3 && defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
238 __m128 vd = _mm_mul_ps(mVec128, v.mVec128);
239 __m128 z = _mm_movehl_ps(vd, vd);
240 __m128 y = _mm_shuffle_ps(vd, vd, 0x55);
241 vd = _mm_add_ss(vd, y);
242 vd = _mm_add_ss(vd, z);
243 return _mm_cvtss_f32(vd);
244 #elif defined(BT_USE_NEON)
245 float32x4_t vd = vmulq_f32(mVec128, v.mVec128);
246 float32x2_t x = vpadd_f32(vget_low_f32(vd), vget_low_f32(vd));
247 x = vadd_f32(x, vget_high_f32(vd));
248 return vget_lane_f32(x, 0);
250 return m_floats[0] * v.
m_floats[0] +
285 int maxIndex = absVec.
maxAxis();
286 if (absVec[maxIndex]>0)
288 *
this /= absVec[maxIndex];
302 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
304 __m128 vd = _mm_mul_ps(mVec128, mVec128);
305 __m128 z = _mm_movehl_ps(vd, vd);
306 __m128 y = _mm_shuffle_ps(vd, vd, 0x55);
307 vd = _mm_add_ss(vd, y);
308 vd = _mm_add_ss(vd, z);
311 vd = _mm_sqrt_ss(vd);
312 vd = _mm_div_ss(v1110, vd);
313 vd = bt_splat_ps(vd, 0x80);
314 mVec128 = _mm_mul_ps(mVec128, vd);
318 y = _mm_rsqrt_ss(vd);
322 vd = _mm_mul_ss(vd, vHalf);
324 vd = _mm_mul_ss(vd, y);
325 vd = _mm_mul_ss(vd, y);
326 z = _mm_sub_ss(z, vd);
328 y = _mm_mul_ss(y, z);
330 y = bt_splat_ps(y, 0x80);
331 mVec128 = _mm_mul_ps(mVec128, y);
363 #if defined BT_USE_SIMD_VECTOR3 && defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
364 return btVector3(_mm_and_ps(mVec128, btv3AbsfMask));
365 #elif defined(BT_USE_NEON)
379 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
382 T = bt_pshufd_ps(mVec128, BT_SHUFFLE(1, 2, 0, 3));
383 V = bt_pshufd_ps(v.mVec128, BT_SHUFFLE(1, 2, 0, 3));
385 V = _mm_mul_ps(V, mVec128);
386 T = _mm_mul_ps(T, v.mVec128);
387 V = _mm_sub_ps(V, T);
389 V = bt_pshufd_ps(V, BT_SHUFFLE(1, 2, 0, 3));
391 #elif defined(BT_USE_NEON)
394 float32x2_t Tlow = vget_low_f32(mVec128);
395 float32x2_t Vlow = vget_low_f32(v.mVec128);
396 T = vcombine_f32(vext_f32(Tlow, vget_high_f32(mVec128), 1), Tlow);
397 V = vcombine_f32(vext_f32(Vlow, vget_high_f32(v.mVec128), 1), Vlow);
399 V = vmulq_f32(V, mVec128);
400 T = vmulq_f32(T, v.mVec128);
402 Vlow = vget_low_f32(V);
404 V = vcombine_f32(vext_f32(Vlow, vget_high_f32(V), 1), Vlow);
405 V = (float32x4_t)vandq_s32((int32x4_t)
V, btvFFF0Mask);
418 #if defined BT_USE_SIMD_VECTOR3 && defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
420 __m128 T = _mm_shuffle_ps(v1.mVec128, v1.mVec128, BT_SHUFFLE(1, 2, 0, 3));
421 __m128
V = _mm_shuffle_ps(v2.mVec128, v2.mVec128, BT_SHUFFLE(1, 2, 0, 3));
423 V = _mm_mul_ps(V, v1.mVec128);
424 T = _mm_mul_ps(T, v2.mVec128);
425 V = _mm_sub_ps(V, T);
427 V = _mm_shuffle_ps(V, V, BT_SHUFFLE(1, 2, 0, 3));
430 V = _mm_mul_ps(V, mVec128);
431 __m128 z = _mm_movehl_ps(V, V);
432 __m128 y = _mm_shuffle_ps(V, V, 0x55);
433 V = _mm_add_ss(V, y);
434 V = _mm_add_ss(V, z);
435 return _mm_cvtss_f32(V);
437 #elif defined(BT_USE_NEON)
441 float32x2_t Tlow = vget_low_f32(v1.mVec128);
442 float32x2_t Vlow = vget_low_f32(v2.mVec128);
443 T = vcombine_f32(vext_f32(Tlow, vget_high_f32(v1.mVec128), 1), Tlow);
444 V = vcombine_f32(vext_f32(Vlow, vget_high_f32(v2.mVec128), 1), Vlow);
446 V = vmulq_f32(V, v1.mVec128);
447 T = vmulq_f32(T, v2.mVec128);
449 Vlow = vget_low_f32(V);
451 V = vcombine_f32(vext_f32(Vlow, vget_high_f32(V), 1), Vlow);
454 V = vmulq_f32(mVec128, V);
455 float32x2_t x = vpadd_f32(vget_low_f32(V), vget_low_f32(V));
456 x = vadd_f32(x, vget_high_f32(V));
457 return vget_lane_f32(x, 0);
470 return m_floats[0] < m_floats[1] ? (m_floats[0] <m_floats[2] ? 0 : 2) : (m_floats[1] <m_floats[2] ? 1 : 2);
477 return m_floats[0] < m_floats[1] ? (m_floats[1] <m_floats[2] ? 2 : 1) : (m_floats[0] <m_floats[2] ? 2 : 0);
482 return absolute().minAxis();
487 return absolute().maxAxis();
493 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
494 __m128 vrt = _mm_load_ss(&rt);
496 __m128 vs = _mm_load_ss(&s);
497 vs = bt_pshufd_ps(vs, 0x80);
498 __m128 r0 = _mm_mul_ps(v0.mVec128, vs);
499 vrt = bt_pshufd_ps(vrt, 0x80);
500 __m128 r1 = _mm_mul_ps(v1.mVec128, vrt);
501 __m128 tmp3 = _mm_add_ps(r0,r1);
503 #elif defined(BT_USE_NEON)
504 mVec128 = vsubq_f32(v1.mVec128, v0.mVec128);
505 mVec128 = vmulq_n_f32(mVec128, rt);
506 mVec128 = vaddq_f32(mVec128, v0.mVec128);
522 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
523 __m128 vt = _mm_load_ss(&t);
524 vt = bt_pshufd_ps(vt, 0x80);
525 __m128 vl = _mm_sub_ps(v.mVec128, mVec128);
526 vl = _mm_mul_ps(vl, vt);
527 vl = _mm_add_ps(vl, mVec128);
530 #elif defined(BT_USE_NEON)
531 float32x4_t vl = vsubq_f32(v.mVec128, mVec128);
532 vl = vmulq_n_f32(vl, t);
533 vl = vaddq_f32(vl, mVec128);
539 m_floats[1] + (v.
m_floats[1] - m_floats[1]) * t,
540 m_floats[2] + (v.
m_floats[2] - m_floats[2]) * t);
548 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
549 mVec128 = _mm_mul_ps(mVec128, v.mVec128);
550 #elif defined(BT_USE_NEON)
551 mVec128 = vmulq_f32(mVec128, v.mVec128);
591 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
592 return (0xf == _mm_movemask_ps((__m128)_mm_cmpeq_ps(mVec128, other.mVec128)));
594 return ((m_floats[3]==other.
m_floats[3]) &&
603 return !(*
this == other);
611 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
612 mVec128 = _mm_max_ps(mVec128, other.mVec128);
613 #elif defined(BT_USE_NEON)
614 mVec128 = vmaxq_f32(mVec128, other.mVec128);
628 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
629 mVec128 = _mm_min_ps(mVec128, other.mVec128);
630 #elif defined(BT_USE_NEON)
631 mVec128 = vminq_f32(mVec128, other.mVec128);
650 #if defined BT_USE_SIMD_VECTOR3 && defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
652 __m128
V = _mm_and_ps(mVec128, btvFFF0fMask);
653 __m128 V0 = _mm_xor_ps(btvMzeroMask, V);
654 __m128 V2 = _mm_movelh_ps(V0, V);
656 __m128 V1 = _mm_shuffle_ps(V, V0, 0xCE);
658 V0 = _mm_shuffle_ps(V0, V, 0xDB);
659 V2 = _mm_shuffle_ps(V2, V, 0xF9);
673 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
674 mVec128 = (__m128)_mm_xor_ps(mVec128, mVec128);
675 #elif defined(BT_USE_NEON)
676 int32x4_t vi = vdupq_n_s32(0);
677 mVec128 = vreinterpretq_f32_s32(vi);
720 #if defined BT_USE_SIMD_VECTOR3 && defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
722 __m128 a0 = _mm_mul_ps( v0.mVec128, this->mVec128 );
723 __m128 a1 = _mm_mul_ps( v1.mVec128, this->mVec128 );
724 __m128 a2 = _mm_mul_ps( v2.mVec128, this->mVec128 );
725 __m128 b0 = _mm_unpacklo_ps( a0, a1 );
726 __m128 b1 = _mm_unpackhi_ps( a0, a1 );
727 __m128 b2 = _mm_unpacklo_ps( a2, _mm_setzero_ps() );
728 __m128 r = _mm_movelh_ps( b0, b2 );
729 r = _mm_add_ps( r, _mm_movehl_ps( b2, b0 ));
730 a2 = _mm_and_ps( a2, btvxyzMaskf);
731 r = _mm_add_ps( r, btCastdTo128f (_mm_move_sd( btCastfTo128d(a2), btCastfTo128d(b1) )));
734 #elif defined(BT_USE_NEON)
735 static const uint32x4_t xyzMask = (
const uint32x4_t){
static_cast<uint32_t>(-1), static_cast<uint32_t>(-1),
static_cast<uint32_t>(-1), 0 };
736 float32x4_t a0 = vmulq_f32( v0.mVec128, this->mVec128);
737 float32x4_t a1 = vmulq_f32( v1.mVec128, this->mVec128);
738 float32x4_t a2 = vmulq_f32( v2.mVec128, this->mVec128);
739 float32x2x2_t zLo = vtrn_f32( vget_high_f32(a0), vget_high_f32(a1));
740 a2 = (float32x4_t) vandq_u32((uint32x4_t) a2, xyzMask );
741 float32x2_t b0 = vadd_f32( vpadd_f32( vget_low_f32(a0), vget_low_f32(a1)), zLo.val[0] );
742 float32x2_t b1 = vpadd_f32( vpadd_f32( vget_low_f32(a2), vget_high_f32(a2)), vdup_n_f32(0.0f));
743 return btVector3( vcombine_f32(b0, b1) );
754 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
755 return btVector3(_mm_add_ps(v1.mVec128, v2.mVec128));
756 #elif defined(BT_USE_NEON)
757 return btVector3(vaddq_f32(v1.mVec128, v2.mVec128));
770 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
771 return btVector3(_mm_mul_ps(v1.mVec128, v2.mVec128));
772 #elif defined(BT_USE_NEON)
773 return btVector3(vmulq_f32(v1.mVec128, v2.mVec128));
786 #if defined BT_USE_SIMD_VECTOR3 && (defined(BT_USE_SSE_IN_API) && defined(BT_USE_SSE))
789 __m128 r = _mm_sub_ps(v1.mVec128, v2.mVec128);
790 return btVector3(_mm_and_ps(r, btvFFF0fMask));
791 #elif defined(BT_USE_NEON)
792 float32x4_t r = vsubq_f32(v1.mVec128, v2.mVec128);
793 return btVector3((float32x4_t)vandq_s32((int32x4_t)r, btvFFF0Mask));
806 #if defined BT_USE_SIMD_VECTOR3 && (defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE))
807 __m128 r = _mm_xor_ps(v.mVec128, btvMzeroMask);
808 return btVector3(_mm_and_ps(r, btvFFF0fMask));
809 #elif defined(BT_USE_NEON)
810 return btVector3((btSimdFloat4)veorq_s32((int32x4_t)v.mVec128, (int32x4_t)btvMzeroMask));
820 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
821 __m128 vs = _mm_load_ss(&s);
822 vs = bt_pshufd_ps(vs, 0x80);
823 return btVector3(_mm_mul_ps(v.mVec128, vs));
824 #elif defined(BT_USE_NEON)
825 float32x4_t r = vmulq_n_f32(v.mVec128, s);
826 return btVector3((float32x4_t)vandq_s32((int32x4_t)r, btvFFF0Mask));
844 #if 0 //defined(BT_USE_SSE_IN_API)
846 __m128 vs = _mm_load_ss(&s);
847 vs = _mm_div_ss(v1110, vs);
848 vs = bt_pshufd_ps(vs, 0x00);
850 return btVector3(_mm_mul_ps(v.mVec128, vs));
860 #if defined BT_USE_SIMD_VECTOR3 && (defined(BT_USE_SSE_IN_API)&& defined (BT_USE_SSE))
861 __m128 vec = _mm_div_ps(v1.mVec128, v2.mVec128);
862 vec = _mm_and_ps(vec, btvFFF0fMask);
864 #elif defined(BT_USE_NEON)
865 float32x4_t x, y, v, m;
871 m = vrecpsq_f32(y, v);
873 m = vrecpsq_f32(y, v);
936 return v1.
lerp(v2, t);
943 return (v - *
this).length2();
948 return (v - *
this).length();
962 #if defined BT_USE_SIMD_VECTOR3 && defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
964 __m128 O = _mm_mul_ps(wAxis.mVec128, mVec128);
966 __m128 C = wAxis.
cross( mVec128 ).mVec128;
967 O = _mm_and_ps(O, btvFFF0fMask);
970 __m128 vsin = _mm_load_ss(&ssin);
971 __m128 vcos = _mm_load_ss(&scos);
973 __m128 Y = bt_pshufd_ps(O, 0xC9);
974 __m128 Z = bt_pshufd_ps(O, 0xD2);
975 O = _mm_add_ps(O, Y);
976 vsin = bt_pshufd_ps(vsin, 0x80);
977 O = _mm_add_ps(O, Z);
978 vcos = bt_pshufd_ps(vcos, 0x80);
981 O = O * wAxis.mVec128;
982 __m128 X = mVec128 - O;
994 _y = wAxis.
cross( *
this );
996 return ( o + _x *
btCos( _angle ) + _y *
btSin( _angle ) );
1002 #if (defined BT_USE_SSE && defined BT_USE_SIMD_VECTOR3 && defined BT_USE_SSE_IN_API) || defined (BT_USE_NEON)
1003 #if defined _WIN32 || defined (BT_USE_SSE)
1004 const long scalar_cutoff = 10;
1005 long _maxdot_large(
const float *array,
const float *vec,
unsigned long array_count,
float *dotOut );
1006 #elif defined BT_USE_NEON
1007 const long scalar_cutoff = 4;
1008 extern long (*_maxdot_large)(
const float *array,
const float *vec,
unsigned long array_count,
float *dotOut );
1010 if( array_count < scalar_cutoff )
1016 for( i = 0; i < array_count; i++ )
1030 #if (defined BT_USE_SSE && defined BT_USE_SIMD_VECTOR3 && defined BT_USE_SSE_IN_API) || defined (BT_USE_NEON)
1031 return _maxdot_large( (
float*) array, (
float*) &m_floats[0], array_count, &dotOut );
1037 #if (defined BT_USE_SSE && defined BT_USE_SIMD_VECTOR3 && defined BT_USE_SSE_IN_API) || defined (BT_USE_NEON)
1038 #if defined BT_USE_SSE
1039 const long scalar_cutoff = 10;
1040 long _mindot_large(
const float *array,
const float *vec,
unsigned long array_count,
float *dotOut );
1041 #elif defined BT_USE_NEON
1042 const long scalar_cutoff = 4;
1043 extern long (*_mindot_large)(
const float *array,
const float *vec,
unsigned long array_count,
float *dotOut );
1045 #error unhandled arch!
1048 if( array_count < scalar_cutoff )
1055 for( i = 0; i < array_count; i++ )
1070 #if (defined BT_USE_SSE && defined BT_USE_SIMD_VECTOR3 && defined BT_USE_SSE_IN_API) || defined (BT_USE_NEON)
1071 return _mindot_large( (
float*) array, (
float*) &m_floats[0], array_count, &dotOut );
1072 #endif//BT_USE_SIMD_VECTOR3
1089 #if (defined (BT_USE_SSE_IN_API)&& defined (BT_USE_SSE)) || defined (BT_USE_NEON)
1097 mVec128 = rhs.mVec128;
1103 mVec128 = v.mVec128;
1106 #endif // #if defined (BT_USE_SSE_IN_API) || defined (BT_USE_NEON)
1110 #if defined BT_USE_SIMD_VECTOR3 && defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
1111 return btVector4(_mm_and_ps(mVec128, btvAbsfMask));
1112 #elif defined(BT_USE_NEON)
1131 if (m_floats[0] > maxVal)
1134 maxVal = m_floats[0];
1136 if (m_floats[1] > maxVal)
1139 maxVal = m_floats[1];
1141 if (m_floats[2] > maxVal)
1144 maxVal =m_floats[2];
1146 if (m_floats[3] > maxVal)
1149 maxVal = m_floats[3];
1160 if (m_floats[0] < minVal)
1163 minVal = m_floats[0];
1165 if (m_floats[1] < minVal)
1168 minVal = m_floats[1];
1170 if (m_floats[2] < minVal)
1173 minVal =m_floats[2];
1175 if (m_floats[3] < minVal)
1178 minVal = m_floats[3];
1228 #ifdef BT_USE_DOUBLE_PRECISION
1229 unsigned char* dest = (
unsigned char*) &destVal;
1230 unsigned char* src = (
unsigned char*) &sourceVal;
1240 unsigned char* dest = (
unsigned char*) &destVal;
1241 unsigned char* src = (
unsigned char*) &sourceVal;
1246 #endif //BT_USE_DOUBLE_PRECISION
1251 for (
int i=0;i<4;i++)
1263 for (
int i=0;i<4;i++)
1267 vector = swappedVec;
1275 btScalar a = n[1]*n[1] + n[2]*n[2];
1287 btScalar a = n[0]*n[0] + n[1]*n[1];
1314 for (
int i=0;i<4;i++)
1315 dataOut.
m_floats[i] =
float(m_floats[i]);
1320 for (
int i=0;i<4;i++)
1328 for (
int i=0;i<4;i++)
1329 dataOut.
m_floats[i] =
double(m_floats[i]);
1334 for (
int i=0;i<4;i++)
1342 for (
int i=0;i<4;i++)
1343 dataOut.m_floats[i] = m_floats[i];
1348 for (
int i=0;i<4;i++)
1349 m_floats[i] = dataIn.m_floats[i];
1352 #endif //BT_VECTOR3_H