00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017 #ifndef BT_VECTOR3_H
00018 #define BT_VECTOR3_H
00019
00020
00021 #include "btScalar.h"
00022 #include "btMinMax.h"
00023 #include "btAlignedAllocator.h"
00024
00025 #ifdef BT_USE_DOUBLE_PRECISION
00026 #define btVector3Data btVector3DoubleData
00027 #define btVector3DataName "btVector3DoubleData"
00028 #else
00029 #define btVector3Data btVector3FloatData
00030 #define btVector3DataName "btVector3FloatData"
00031 #endif //BT_USE_DOUBLE_PRECISION
00032
00033 #if defined BT_USE_SSE
00034
00035
00036
00037 #ifdef _MSC_VER
00038 #pragma warning(disable: 4556) // value of intrinsic immediate argument '4294967239' is out of range '0 - 255'
00039 #endif
00040
00041
00042 #define BT_SHUFFLE(x,y,z,w) ((w)<<6 | (z)<<4 | (y)<<2 | (x))
00043
00044 #define bt_pshufd_ps( _a, _mask ) _mm_shuffle_ps((_a), (_a), (_mask) )
00045 #define bt_splat3_ps( _a, _i ) bt_pshufd_ps((_a), BT_SHUFFLE(_i,_i,_i, 3) )
00046 #define bt_splat_ps( _a, _i ) bt_pshufd_ps((_a), BT_SHUFFLE(_i,_i,_i,_i) )
00047
00048 #define btv3AbsiMask (_mm_set_epi32(0x00000000, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF))
00049 #define btvAbsMask (_mm_set_epi32( 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF))
00050 #define btvFFF0Mask (_mm_set_epi32(0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF))
00051 #define btv3AbsfMask btCastiTo128f(btv3AbsiMask)
00052 #define btvFFF0fMask btCastiTo128f(btvFFF0Mask)
00053 #define btvxyzMaskf btvFFF0fMask
00054 #define btvAbsfMask btCastiTo128f(btvAbsMask)
00055
00056
00057
00058 const __m128 ATTRIBUTE_ALIGNED16(btvMzeroMask) = {-0.0f, -0.0f, -0.0f, -0.0f};
00059 const __m128 ATTRIBUTE_ALIGNED16(v1110) = {1.0f, 1.0f, 1.0f, 0.0f};
00060 const __m128 ATTRIBUTE_ALIGNED16(vHalf) = {0.5f, 0.5f, 0.5f, 0.5f};
00061 const __m128 ATTRIBUTE_ALIGNED16(v1_5) = {1.5f, 1.5f, 1.5f, 1.5f};
00062
00063 #endif
00064
00065 #ifdef BT_USE_NEON
00066
00067 const float32x4_t ATTRIBUTE_ALIGNED16(btvMzeroMask) = (float32x4_t){-0.0f, -0.0f, -0.0f, -0.0f};
00068 const int32x4_t ATTRIBUTE_ALIGNED16(btvFFF0Mask) = (int32x4_t){0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x0};
00069 const int32x4_t ATTRIBUTE_ALIGNED16(btvAbsMask) = (int32x4_t){0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF};
00070 const int32x4_t ATTRIBUTE_ALIGNED16(btv3AbsMask) = (int32x4_t){0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x0};
00071
00072 #endif
00073
00078 ATTRIBUTE_ALIGNED16(class) btVector3
00079 {
00080 public:
00081
00082 BT_DECLARE_ALIGNED_ALLOCATOR();
00083
00084 #if defined (__SPU__) && defined (__CELLOS_LV2__)
00085 btScalar m_floats[4];
00086 public:
00087 SIMD_FORCE_INLINE const vec_float4& get128() const
00088 {
00089 return *((const vec_float4*)&m_floats[0]);
00090 }
00091 public:
00092 #else //__CELLOS_LV2__ __SPU__
00093 #if defined (BT_USE_SSE) || defined(BT_USE_NEON) // _WIN32 || ARM
00094 union {
00095 btSimdFloat4 mVec128;
00096 btScalar m_floats[4];
00097 };
00098 SIMD_FORCE_INLINE btSimdFloat4 get128() const
00099 {
00100 return mVec128;
00101 }
00102 SIMD_FORCE_INLINE void set128(btSimdFloat4 v128)
00103 {
00104 mVec128 = v128;
00105 }
00106 #else
00107 btScalar m_floats[4];
00108 #endif
00109 #endif //__CELLOS_LV2__ __SPU__
00110
00111 public:
00112
00114 SIMD_FORCE_INLINE btVector3()
00115 {
00116
00117 }
00118
00119
00120
00126 SIMD_FORCE_INLINE btVector3(const btScalar& _x, const btScalar& _y, const btScalar& _z)
00127 {
00128 m_floats[0] = _x;
00129 m_floats[1] = _y;
00130 m_floats[2] = _z;
00131 m_floats[3] = btScalar(0.f);
00132 }
00133
00134 #if (defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE) )|| defined (BT_USE_NEON)
00135
00136 SIMD_FORCE_INLINE btVector3( btSimdFloat4 v)
00137 {
00138 mVec128 = v;
00139 }
00140
00141
00142 SIMD_FORCE_INLINE btVector3(const btVector3& rhs)
00143 {
00144 mVec128 = rhs.mVec128;
00145 }
00146
00147
00148 SIMD_FORCE_INLINE btVector3&
00149 operator=(const btVector3& v)
00150 {
00151 mVec128 = v.mVec128;
00152
00153 return *this;
00154 }
00155 #endif // #if defined (BT_USE_SSE_IN_API) || defined (BT_USE_NEON)
00156
00159 SIMD_FORCE_INLINE btVector3& operator+=(const btVector3& v)
00160 {
00161 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
00162 mVec128 = _mm_add_ps(mVec128, v.mVec128);
00163 #elif defined(BT_USE_NEON)
00164 mVec128 = vaddq_f32(mVec128, v.mVec128);
00165 #else
00166 m_floats[0] += v.m_floats[0];
00167 m_floats[1] += v.m_floats[1];
00168 m_floats[2] += v.m_floats[2];
00169 #endif
00170 return *this;
00171 }
00172
00173
00176 SIMD_FORCE_INLINE btVector3& operator-=(const btVector3& v)
00177 {
00178 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
00179 mVec128 = _mm_sub_ps(mVec128, v.mVec128);
00180 #elif defined(BT_USE_NEON)
00181 mVec128 = vsubq_f32(mVec128, v.mVec128);
00182 #else
00183 m_floats[0] -= v.m_floats[0];
00184 m_floats[1] -= v.m_floats[1];
00185 m_floats[2] -= v.m_floats[2];
00186 #endif
00187 return *this;
00188 }
00189
00192 SIMD_FORCE_INLINE btVector3& operator*=(const btScalar& s)
00193 {
00194 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
00195 __m128 vs = _mm_load_ss(&s);
00196 vs = bt_pshufd_ps(vs, 0x80);
00197 mVec128 = _mm_mul_ps(mVec128, vs);
00198 #elif defined(BT_USE_NEON)
00199 mVec128 = vmulq_n_f32(mVec128, s);
00200 #else
00201 m_floats[0] *= s;
00202 m_floats[1] *= s;
00203 m_floats[2] *= s;
00204 #endif
00205 return *this;
00206 }
00207
00210 SIMD_FORCE_INLINE btVector3& operator/=(const btScalar& s)
00211 {
00212 btFullAssert(s != btScalar(0.0));
00213
00214 #if 0 //defined(BT_USE_SSE_IN_API)
00215
00216 __m128 vs = _mm_load_ss(&s);
00217 vs = _mm_div_ss(v1110, vs);
00218 vs = bt_pshufd_ps(vs, 0x00);
00219
00220 mVec128 = _mm_mul_ps(mVec128, vs);
00221
00222 return *this;
00223 #else
00224 return *this *= btScalar(1.0) / s;
00225 #endif
00226 }
00227
00230 SIMD_FORCE_INLINE btScalar dot(const btVector3& v) const
00231 {
00232 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
00233 __m128 vd = _mm_mul_ps(mVec128, v.mVec128);
00234 __m128 z = _mm_movehl_ps(vd, vd);
00235 __m128 y = _mm_shuffle_ps(vd, vd, 0x55);
00236 vd = _mm_add_ss(vd, y);
00237 vd = _mm_add_ss(vd, z);
00238 return _mm_cvtss_f32(vd);
00239 #elif defined(BT_USE_NEON)
00240 float32x4_t vd = vmulq_f32(mVec128, v.mVec128);
00241 float32x2_t x = vpadd_f32(vget_low_f32(vd), vget_low_f32(vd));
00242 x = vadd_f32(x, vget_high_f32(vd));
00243 return vget_lane_f32(x, 0);
00244 #else
00245 return m_floats[0] * v.m_floats[0] +
00246 m_floats[1] * v.m_floats[1] +
00247 m_floats[2] * v.m_floats[2];
00248 #endif
00249 }
00250
00252 SIMD_FORCE_INLINE btScalar length2() const
00253 {
00254 return dot(*this);
00255 }
00256
00258 SIMD_FORCE_INLINE btScalar length() const
00259 {
00260 return btSqrt(length2());
00261 }
00262
00265 SIMD_FORCE_INLINE btScalar distance2(const btVector3& v) const;
00266
00269 SIMD_FORCE_INLINE btScalar distance(const btVector3& v) const;
00270
00271 SIMD_FORCE_INLINE btVector3& safeNormalize()
00272 {
00273 btVector3 absVec = this->absolute();
00274 int maxIndex = absVec.maxAxis();
00275 if (absVec[maxIndex]>0)
00276 {
00277 *this /= absVec[maxIndex];
00278 return *this /= length();
00279 }
00280 setValue(1,0,0);
00281 return *this;
00282 }
00283
00286 SIMD_FORCE_INLINE btVector3& normalize()
00287 {
00288 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
00289
00290 __m128 vd = _mm_mul_ps(mVec128, mVec128);
00291 __m128 z = _mm_movehl_ps(vd, vd);
00292 __m128 y = _mm_shuffle_ps(vd, vd, 0x55);
00293 vd = _mm_add_ss(vd, y);
00294 vd = _mm_add_ss(vd, z);
00295
00296 #if 0
00297 vd = _mm_sqrt_ss(vd);
00298 vd = _mm_div_ss(v1110, vd);
00299 vd = bt_splat_ps(vd, 0x80);
00300 mVec128 = _mm_mul_ps(mVec128, vd);
00301 #else
00302
00303
00304 y = _mm_rsqrt_ss(vd);
00305
00306
00307 z = v1_5;
00308 vd = _mm_mul_ss(vd, vHalf);
00309
00310 vd = _mm_mul_ss(vd, y);
00311 vd = _mm_mul_ss(vd, y);
00312 z = _mm_sub_ss(z, vd);
00313
00314 y = _mm_mul_ss(y, z);
00315
00316 y = bt_splat_ps(y, 0x80);
00317 mVec128 = _mm_mul_ps(mVec128, y);
00318
00319 #endif
00320
00321
00322 return *this;
00323 #else
00324 return *this /= length();
00325 #endif
00326 }
00327
00329 SIMD_FORCE_INLINE btVector3 normalized() const;
00330
00334 SIMD_FORCE_INLINE btVector3 rotate( const btVector3& wAxis, const btScalar angle ) const;
00335
00338 SIMD_FORCE_INLINE btScalar angle(const btVector3& v) const
00339 {
00340 btScalar s = btSqrt(length2() * v.length2());
00341 btFullAssert(s != btScalar(0.0));
00342 return btAcos(dot(v) / s);
00343 }
00344
00346 SIMD_FORCE_INLINE btVector3 absolute() const
00347 {
00348 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
00349 return btVector3(_mm_and_ps(mVec128, btv3AbsfMask));
00350 #elif defined(BT_USE_NEON)
00351 return btVector3(vabsq_f32(mVec128));
00352 #else
00353 return btVector3(
00354 btFabs(m_floats[0]),
00355 btFabs(m_floats[1]),
00356 btFabs(m_floats[2]));
00357 #endif
00358 }
00359
00362 SIMD_FORCE_INLINE btVector3 cross(const btVector3& v) const
00363 {
00364 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
00365 __m128 T, V;
00366
00367 T = bt_pshufd_ps(mVec128, BT_SHUFFLE(1, 2, 0, 3));
00368 V = bt_pshufd_ps(v.mVec128, BT_SHUFFLE(1, 2, 0, 3));
00369
00370 V = _mm_mul_ps(V, mVec128);
00371 T = _mm_mul_ps(T, v.mVec128);
00372 V = _mm_sub_ps(V, T);
00373
00374 V = bt_pshufd_ps(V, BT_SHUFFLE(1, 2, 0, 3));
00375 return btVector3(V);
00376 #elif defined(BT_USE_NEON)
00377 float32x4_t T, V;
00378
00379 float32x2_t Tlow = vget_low_f32(mVec128);
00380 float32x2_t Vlow = vget_low_f32(v.mVec128);
00381 T = vcombine_f32(vext_f32(Tlow, vget_high_f32(mVec128), 1), Tlow);
00382 V = vcombine_f32(vext_f32(Vlow, vget_high_f32(v.mVec128), 1), Vlow);
00383
00384 V = vmulq_f32(V, mVec128);
00385 T = vmulq_f32(T, v.mVec128);
00386 V = vsubq_f32(V, T);
00387 Vlow = vget_low_f32(V);
00388
00389 V = vcombine_f32(vext_f32(Vlow, vget_high_f32(V), 1), Vlow);
00390 V = (float32x4_t)vandq_s32((int32x4_t)V, btvFFF0Mask);
00391
00392 return btVector3(V);
00393 #else
00394 return btVector3(
00395 m_floats[1] * v.m_floats[2] - m_floats[2] * v.m_floats[1],
00396 m_floats[2] * v.m_floats[0] - m_floats[0] * v.m_floats[2],
00397 m_floats[0] * v.m_floats[1] - m_floats[1] * v.m_floats[0]);
00398 #endif
00399 }
00400
00401 SIMD_FORCE_INLINE btScalar triple(const btVector3& v1, const btVector3& v2) const
00402 {
00403 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
00404
00405 __m128 T = _mm_shuffle_ps(v1.mVec128, v1.mVec128, BT_SHUFFLE(1, 2, 0, 3));
00406 __m128 V = _mm_shuffle_ps(v2.mVec128, v2.mVec128, BT_SHUFFLE(1, 2, 0, 3));
00407
00408 V = _mm_mul_ps(V, v1.mVec128);
00409 T = _mm_mul_ps(T, v2.mVec128);
00410 V = _mm_sub_ps(V, T);
00411
00412 V = _mm_shuffle_ps(V, V, BT_SHUFFLE(1, 2, 0, 3));
00413
00414
00415 V = _mm_mul_ps(V, mVec128);
00416 __m128 z = _mm_movehl_ps(V, V);
00417 __m128 y = _mm_shuffle_ps(V, V, 0x55);
00418 V = _mm_add_ss(V, y);
00419 V = _mm_add_ss(V, z);
00420 return _mm_cvtss_f32(V);
00421
00422 #elif defined(BT_USE_NEON)
00423
00424 float32x4_t T, V;
00425
00426 float32x2_t Tlow = vget_low_f32(v1.mVec128);
00427 float32x2_t Vlow = vget_low_f32(v2.mVec128);
00428 T = vcombine_f32(vext_f32(Tlow, vget_high_f32(v1.mVec128), 1), Tlow);
00429 V = vcombine_f32(vext_f32(Vlow, vget_high_f32(v2.mVec128), 1), Vlow);
00430
00431 V = vmulq_f32(V, v1.mVec128);
00432 T = vmulq_f32(T, v2.mVec128);
00433 V = vsubq_f32(V, T);
00434 Vlow = vget_low_f32(V);
00435
00436 V = vcombine_f32(vext_f32(Vlow, vget_high_f32(V), 1), Vlow);
00437
00438
00439 V = vmulq_f32(mVec128, V);
00440 float32x2_t x = vpadd_f32(vget_low_f32(V), vget_low_f32(V));
00441 x = vadd_f32(x, vget_high_f32(V));
00442 return vget_lane_f32(x, 0);
00443 #else
00444 return
00445 m_floats[0] * (v1.m_floats[1] * v2.m_floats[2] - v1.m_floats[2] * v2.m_floats[1]) +
00446 m_floats[1] * (v1.m_floats[2] * v2.m_floats[0] - v1.m_floats[0] * v2.m_floats[2]) +
00447 m_floats[2] * (v1.m_floats[0] * v2.m_floats[1] - v1.m_floats[1] * v2.m_floats[0]);
00448 #endif
00449 }
00450
00453 SIMD_FORCE_INLINE int minAxis() const
00454 {
00455 return m_floats[0] < m_floats[1] ? (m_floats[0] <m_floats[2] ? 0 : 2) : (m_floats[1] <m_floats[2] ? 1 : 2);
00456 }
00457
00460 SIMD_FORCE_INLINE int maxAxis() const
00461 {
00462 return m_floats[0] < m_floats[1] ? (m_floats[1] <m_floats[2] ? 2 : 1) : (m_floats[0] <m_floats[2] ? 2 : 0);
00463 }
00464
00465 SIMD_FORCE_INLINE int furthestAxis() const
00466 {
00467 return absolute().minAxis();
00468 }
00469
00470 SIMD_FORCE_INLINE int closestAxis() const
00471 {
00472 return absolute().maxAxis();
00473 }
00474
00475
00476 SIMD_FORCE_INLINE void setInterpolate3(const btVector3& v0, const btVector3& v1, btScalar rt)
00477 {
00478 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
00479 __m128 vrt = _mm_load_ss(&rt);
00480 btScalar s = btScalar(1.0) - rt;
00481 __m128 vs = _mm_load_ss(&s);
00482 vs = bt_pshufd_ps(vs, 0x80);
00483 __m128 r0 = _mm_mul_ps(v0.mVec128, vs);
00484 vrt = bt_pshufd_ps(vrt, 0x80);
00485 __m128 r1 = _mm_mul_ps(v1.mVec128, vrt);
00486 __m128 tmp3 = _mm_add_ps(r0,r1);
00487 mVec128 = tmp3;
00488 #elif defined(BT_USE_NEON)
00489 mVec128 = vsubq_f32(v1.mVec128, v0.mVec128);
00490 mVec128 = vmulq_n_f32(mVec128, rt);
00491 mVec128 = vaddq_f32(mVec128, v0.mVec128);
00492 #else
00493 btScalar s = btScalar(1.0) - rt;
00494 m_floats[0] = s * v0.m_floats[0] + rt * v1.m_floats[0];
00495 m_floats[1] = s * v0.m_floats[1] + rt * v1.m_floats[1];
00496 m_floats[2] = s * v0.m_floats[2] + rt * v1.m_floats[2];
00497
00498
00499 #endif
00500 }
00501
00505 SIMD_FORCE_INLINE btVector3 lerp(const btVector3& v, const btScalar& t) const
00506 {
00507 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
00508 __m128 vt = _mm_load_ss(&t);
00509 vt = bt_pshufd_ps(vt, 0x80);
00510 __m128 vl = _mm_sub_ps(v.mVec128, mVec128);
00511 vl = _mm_mul_ps(vl, vt);
00512 vl = _mm_add_ps(vl, mVec128);
00513
00514 return btVector3(vl);
00515 #elif defined(BT_USE_NEON)
00516 float32x4_t vl = vsubq_f32(v.mVec128, mVec128);
00517 vl = vmulq_n_f32(vl, t);
00518 vl = vaddq_f32(vl, mVec128);
00519
00520 return btVector3(vl);
00521 #else
00522 return
00523 btVector3( m_floats[0] + (v.m_floats[0] - m_floats[0]) * t,
00524 m_floats[1] + (v.m_floats[1] - m_floats[1]) * t,
00525 m_floats[2] + (v.m_floats[2] - m_floats[2]) * t);
00526 #endif
00527 }
00528
00531 SIMD_FORCE_INLINE btVector3& operator*=(const btVector3& v)
00532 {
00533 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
00534 mVec128 = _mm_mul_ps(mVec128, v.mVec128);
00535 #elif defined(BT_USE_NEON)
00536 mVec128 = vmulq_f32(mVec128, v.mVec128);
00537 #else
00538 m_floats[0] *= v.m_floats[0];
00539 m_floats[1] *= v.m_floats[1];
00540 m_floats[2] *= v.m_floats[2];
00541 #endif
00542 return *this;
00543 }
00544
00546 SIMD_FORCE_INLINE const btScalar& getX() const { return m_floats[0]; }
00548 SIMD_FORCE_INLINE const btScalar& getY() const { return m_floats[1]; }
00550 SIMD_FORCE_INLINE const btScalar& getZ() const { return m_floats[2]; }
00552 SIMD_FORCE_INLINE void setX(btScalar _x) { m_floats[0] = _x;};
00554 SIMD_FORCE_INLINE void setY(btScalar _y) { m_floats[1] = _y;};
00556 SIMD_FORCE_INLINE void setZ(btScalar _z) { m_floats[2] = _z;};
00558 SIMD_FORCE_INLINE void setW(btScalar _w) { m_floats[3] = _w;};
00560 SIMD_FORCE_INLINE const btScalar& x() const { return m_floats[0]; }
00562 SIMD_FORCE_INLINE const btScalar& y() const { return m_floats[1]; }
00564 SIMD_FORCE_INLINE const btScalar& z() const { return m_floats[2]; }
00566 SIMD_FORCE_INLINE const btScalar& w() const { return m_floats[3]; }
00567
00568
00569
00571 SIMD_FORCE_INLINE operator btScalar *() { return &m_floats[0]; }
00572 SIMD_FORCE_INLINE operator const btScalar *() const { return &m_floats[0]; }
00573
00574 SIMD_FORCE_INLINE bool operator==(const btVector3& other) const
00575 {
00576 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
00577 return (0xf == _mm_movemask_ps((__m128)_mm_cmpeq_ps(mVec128, other.mVec128)));
00578 #else
00579 return ((m_floats[3]==other.m_floats[3]) &&
00580 (m_floats[2]==other.m_floats[2]) &&
00581 (m_floats[1]==other.m_floats[1]) &&
00582 (m_floats[0]==other.m_floats[0]));
00583 #endif
00584 }
00585
00586 SIMD_FORCE_INLINE bool operator!=(const btVector3& other) const
00587 {
00588 return !(*this == other);
00589 }
00590
00594 SIMD_FORCE_INLINE void setMax(const btVector3& other)
00595 {
00596 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
00597 mVec128 = _mm_max_ps(mVec128, other.mVec128);
00598 #elif defined(BT_USE_NEON)
00599 mVec128 = vmaxq_f32(mVec128, other.mVec128);
00600 #else
00601 btSetMax(m_floats[0], other.m_floats[0]);
00602 btSetMax(m_floats[1], other.m_floats[1]);
00603 btSetMax(m_floats[2], other.m_floats[2]);
00604 btSetMax(m_floats[3], other.w());
00605 #endif
00606 }
00607
00611 SIMD_FORCE_INLINE void setMin(const btVector3& other)
00612 {
00613 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
00614 mVec128 = _mm_min_ps(mVec128, other.mVec128);
00615 #elif defined(BT_USE_NEON)
00616 mVec128 = vminq_f32(mVec128, other.mVec128);
00617 #else
00618 btSetMin(m_floats[0], other.m_floats[0]);
00619 btSetMin(m_floats[1], other.m_floats[1]);
00620 btSetMin(m_floats[2], other.m_floats[2]);
00621 btSetMin(m_floats[3], other.w());
00622 #endif
00623 }
00624
00625 SIMD_FORCE_INLINE void setValue(const btScalar& _x, const btScalar& _y, const btScalar& _z)
00626 {
00627 m_floats[0]=_x;
00628 m_floats[1]=_y;
00629 m_floats[2]=_z;
00630 m_floats[3] = btScalar(0.f);
00631 }
00632
00633 void getSkewSymmetricMatrix(btVector3* v0,btVector3* v1,btVector3* v2) const
00634 {
00635 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
00636
00637 __m128 V = _mm_and_ps(mVec128, btvFFF0fMask);
00638 __m128 V0 = _mm_xor_ps(btvMzeroMask, V);
00639 __m128 V2 = _mm_movelh_ps(V0, V);
00640
00641 __m128 V1 = _mm_shuffle_ps(V, V0, 0xCE);
00642
00643 V0 = _mm_shuffle_ps(V0, V, 0xDB);
00644 V2 = _mm_shuffle_ps(V2, V, 0xF9);
00645
00646 v0->mVec128 = V0;
00647 v1->mVec128 = V1;
00648 v2->mVec128 = V2;
00649 #else
00650 v0->setValue(0. ,-z() ,y());
00651 v1->setValue(z() ,0. ,-x());
00652 v2->setValue(-y() ,x() ,0.);
00653 #endif
00654 }
00655
00656 void setZero()
00657 {
00658 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
00659 mVec128 = (__m128)_mm_xor_ps(mVec128, mVec128);
00660 #elif defined(BT_USE_NEON)
00661 int32x4_t vi = vdupq_n_s32(0);
00662 mVec128 = vreinterpretq_f32_s32(vi);
00663 #else
00664 setValue(btScalar(0.),btScalar(0.),btScalar(0.));
00665 #endif
00666 }
00667
00668 SIMD_FORCE_INLINE bool isZero() const
00669 {
00670 return m_floats[0] == btScalar(0) && m_floats[1] == btScalar(0) && m_floats[2] == btScalar(0);
00671 }
00672
00673 SIMD_FORCE_INLINE bool fuzzyZero() const
00674 {
00675 return length2() < SIMD_EPSILON;
00676 }
00677
00678 SIMD_FORCE_INLINE void serialize(struct btVector3Data& dataOut) const;
00679
00680 SIMD_FORCE_INLINE void deSerialize(const struct btVector3Data& dataIn);
00681
00682 SIMD_FORCE_INLINE void serializeFloat(struct btVector3FloatData& dataOut) const;
00683
00684 SIMD_FORCE_INLINE void deSerializeFloat(const struct btVector3FloatData& dataIn);
00685
00686 SIMD_FORCE_INLINE void serializeDouble(struct btVector3DoubleData& dataOut) const;
00687
00688 SIMD_FORCE_INLINE void deSerializeDouble(const struct btVector3DoubleData& dataIn);
00689
00694 SIMD_FORCE_INLINE long maxDot( const btVector3 *array, long array_count, btScalar &dotOut ) const;
00695
00700 SIMD_FORCE_INLINE long minDot( const btVector3 *array, long array_count, btScalar &dotOut ) const;
00701
00702
00703 SIMD_FORCE_INLINE btVector3 dot3( const btVector3 &v0, const btVector3 &v1, const btVector3 &v2 ) const
00704 {
00705 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
00706
00707 __m128 a0 = _mm_mul_ps( v0.mVec128, this->mVec128 );
00708 __m128 a1 = _mm_mul_ps( v1.mVec128, this->mVec128 );
00709 __m128 a2 = _mm_mul_ps( v2.mVec128, this->mVec128 );
00710 __m128 b0 = _mm_unpacklo_ps( a0, a1 );
00711 __m128 b1 = _mm_unpackhi_ps( a0, a1 );
00712 __m128 b2 = _mm_unpacklo_ps( a2, _mm_setzero_ps() );
00713 __m128 r = _mm_movelh_ps( b0, b2 );
00714 r = _mm_add_ps( r, _mm_movehl_ps( b2, b0 ));
00715 a2 = _mm_and_ps( a2, btvxyzMaskf);
00716 r = _mm_add_ps( r, btCastdTo128f (_mm_move_sd( btCastfTo128d(a2), btCastfTo128d(b1) )));
00717 return btVector3(r);
00718
00719 #elif defined(BT_USE_NEON)
00720 static const uint32x4_t xyzMask = (const uint32x4_t){ -1, -1, -1, 0 };
00721 float32x4_t a0 = vmulq_f32( v0.mVec128, this->mVec128);
00722 float32x4_t a1 = vmulq_f32( v1.mVec128, this->mVec128);
00723 float32x4_t a2 = vmulq_f32( v2.mVec128, this->mVec128);
00724 float32x2x2_t zLo = vtrn_f32( vget_high_f32(a0), vget_high_f32(a1));
00725 a2 = (float32x4_t) vandq_u32((uint32x4_t) a2, xyzMask );
00726 float32x2_t b0 = vadd_f32( vpadd_f32( vget_low_f32(a0), vget_low_f32(a1)), zLo.val[0] );
00727 float32x2_t b1 = vpadd_f32( vpadd_f32( vget_low_f32(a2), vget_high_f32(a2)), vdup_n_f32(0.0f));
00728 return btVector3( vcombine_f32(b0, b1) );
00729 #else
00730 return btVector3( dot(v0), dot(v1), dot(v2));
00731 #endif
00732 }
00733 };
00734
00736 SIMD_FORCE_INLINE btVector3
00737 operator+(const btVector3& v1, const btVector3& v2)
00738 {
00739 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
00740 return btVector3(_mm_add_ps(v1.mVec128, v2.mVec128));
00741 #elif defined(BT_USE_NEON)
00742 return btVector3(vaddq_f32(v1.mVec128, v2.mVec128));
00743 #else
00744 return btVector3(
00745 v1.m_floats[0] + v2.m_floats[0],
00746 v1.m_floats[1] + v2.m_floats[1],
00747 v1.m_floats[2] + v2.m_floats[2]);
00748 #endif
00749 }
00750
00752 SIMD_FORCE_INLINE btVector3
00753 operator*(const btVector3& v1, const btVector3& v2)
00754 {
00755 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
00756 return btVector3(_mm_mul_ps(v1.mVec128, v2.mVec128));
00757 #elif defined(BT_USE_NEON)
00758 return btVector3(vmulq_f32(v1.mVec128, v2.mVec128));
00759 #else
00760 return btVector3(
00761 v1.m_floats[0] * v2.m_floats[0],
00762 v1.m_floats[1] * v2.m_floats[1],
00763 v1.m_floats[2] * v2.m_floats[2]);
00764 #endif
00765 }
00766
00768 SIMD_FORCE_INLINE btVector3
00769 operator-(const btVector3& v1, const btVector3& v2)
00770 {
00771 #if (defined(BT_USE_SSE_IN_API) && defined(BT_USE_SSE))
00772
00773
00774 __m128 r = _mm_sub_ps(v1.mVec128, v2.mVec128);
00775 return btVector3(_mm_and_ps(r, btvFFF0fMask));
00776 #elif defined(BT_USE_NEON)
00777 float32x4_t r = vsubq_f32(v1.mVec128, v2.mVec128);
00778 return btVector3((float32x4_t)vandq_s32((int32x4_t)r, btvFFF0Mask));
00779 #else
00780 return btVector3(
00781 v1.m_floats[0] - v2.m_floats[0],
00782 v1.m_floats[1] - v2.m_floats[1],
00783 v1.m_floats[2] - v2.m_floats[2]);
00784 #endif
00785 }
00786
00788 SIMD_FORCE_INLINE btVector3
00789 operator-(const btVector3& v)
00790 {
00791 #if (defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE))
00792 __m128 r = _mm_xor_ps(v.mVec128, btvMzeroMask);
00793 return btVector3(_mm_and_ps(r, btvFFF0fMask));
00794 #elif defined(BT_USE_NEON)
00795 return btVector3((btSimdFloat4)veorq_s32((int32x4_t)v.mVec128, (int32x4_t)btvMzeroMask));
00796 #else
00797 return btVector3(-v.m_floats[0], -v.m_floats[1], -v.m_floats[2]);
00798 #endif
00799 }
00800
00802 SIMD_FORCE_INLINE btVector3
00803 operator*(const btVector3& v, const btScalar& s)
00804 {
00805 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
00806 __m128 vs = _mm_load_ss(&s);
00807 vs = bt_pshufd_ps(vs, 0x80);
00808 return btVector3(_mm_mul_ps(v.mVec128, vs));
00809 #elif defined(BT_USE_NEON)
00810 float32x4_t r = vmulq_n_f32(v.mVec128, s);
00811 return btVector3((float32x4_t)vandq_s32((int32x4_t)r, btvFFF0Mask));
00812 #else
00813 return btVector3(v.m_floats[0] * s, v.m_floats[1] * s, v.m_floats[2] * s);
00814 #endif
00815 }
00816
00818 SIMD_FORCE_INLINE btVector3
00819 operator*(const btScalar& s, const btVector3& v)
00820 {
00821 return v * s;
00822 }
00823
00825 SIMD_FORCE_INLINE btVector3
00826 operator/(const btVector3& v, const btScalar& s)
00827 {
00828 btFullAssert(s != btScalar(0.0));
00829 #if 0 //defined(BT_USE_SSE_IN_API)
00830
00831 __m128 vs = _mm_load_ss(&s);
00832 vs = _mm_div_ss(v1110, vs);
00833 vs = bt_pshufd_ps(vs, 0x00);
00834
00835 return btVector3(_mm_mul_ps(v.mVec128, vs));
00836 #else
00837 return v * (btScalar(1.0) / s);
00838 #endif
00839 }
00840
00842 SIMD_FORCE_INLINE btVector3
00843 operator/(const btVector3& v1, const btVector3& v2)
00844 {
00845 #if (defined(BT_USE_SSE_IN_API)&& defined (BT_USE_SSE))
00846 __m128 vec = _mm_div_ps(v1.mVec128, v2.mVec128);
00847 vec = _mm_and_ps(vec, btvFFF0fMask);
00848 return btVector3(vec);
00849 #elif defined(BT_USE_NEON)
00850 float32x4_t x, y, v, m;
00851
00852 x = v1.mVec128;
00853 y = v2.mVec128;
00854
00855 v = vrecpeq_f32(y);
00856 m = vrecpsq_f32(y, v);
00857 v = vmulq_f32(v, m);
00858 m = vrecpsq_f32(y, v);
00859 v = vmulq_f32(v, x);
00860 v = vmulq_f32(v, m);
00861
00862 return btVector3(v);
00863 #else
00864 return btVector3(
00865 v1.m_floats[0] / v2.m_floats[0],
00866 v1.m_floats[1] / v2.m_floats[1],
00867 v1.m_floats[2] / v2.m_floats[2]);
00868 #endif
00869 }
00870
00872 SIMD_FORCE_INLINE btScalar
00873 btDot(const btVector3& v1, const btVector3& v2)
00874 {
00875 return v1.dot(v2);
00876 }
00877
00878
00880 SIMD_FORCE_INLINE btScalar
00881 btDistance2(const btVector3& v1, const btVector3& v2)
00882 {
00883 return v1.distance2(v2);
00884 }
00885
00886
00888 SIMD_FORCE_INLINE btScalar
00889 btDistance(const btVector3& v1, const btVector3& v2)
00890 {
00891 return v1.distance(v2);
00892 }
00893
00895 SIMD_FORCE_INLINE btScalar
00896 btAngle(const btVector3& v1, const btVector3& v2)
00897 {
00898 return v1.angle(v2);
00899 }
00900
00902 SIMD_FORCE_INLINE btVector3
00903 btCross(const btVector3& v1, const btVector3& v2)
00904 {
00905 return v1.cross(v2);
00906 }
00907
00908 SIMD_FORCE_INLINE btScalar
00909 btTriple(const btVector3& v1, const btVector3& v2, const btVector3& v3)
00910 {
00911 return v1.triple(v2, v3);
00912 }
00913
00918 SIMD_FORCE_INLINE btVector3
00919 lerp(const btVector3& v1, const btVector3& v2, const btScalar& t)
00920 {
00921 return v1.lerp(v2, t);
00922 }
00923
00924
00925
00926 SIMD_FORCE_INLINE btScalar btVector3::distance2(const btVector3& v) const
00927 {
00928 return (v - *this).length2();
00929 }
00930
00931 SIMD_FORCE_INLINE btScalar btVector3::distance(const btVector3& v) const
00932 {
00933 return (v - *this).length();
00934 }
00935
00936 SIMD_FORCE_INLINE btVector3 btVector3::normalized() const
00937 {
00938 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
00939 btVector3 norm = *this;
00940
00941 return norm.normalize();
00942 #else
00943 return *this / length();
00944 #endif
00945 }
00946
00947 SIMD_FORCE_INLINE btVector3 btVector3::rotate( const btVector3& wAxis, const btScalar _angle ) const
00948 {
00949
00950
00951 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
00952
00953 __m128 O = _mm_mul_ps(wAxis.mVec128, mVec128);
00954 btScalar ssin = btSin( _angle );
00955 __m128 C = wAxis.cross( mVec128 ).mVec128;
00956 O = _mm_and_ps(O, btvFFF0fMask);
00957 btScalar scos = btCos( _angle );
00958
00959 __m128 vsin = _mm_load_ss(&ssin);
00960 __m128 vcos = _mm_load_ss(&scos);
00961
00962 __m128 Y = bt_pshufd_ps(O, 0xC9);
00963 __m128 Z = bt_pshufd_ps(O, 0xD2);
00964 O = _mm_add_ps(O, Y);
00965 vsin = bt_pshufd_ps(vsin, 0x80);
00966 O = _mm_add_ps(O, Z);
00967 vcos = bt_pshufd_ps(vcos, 0x80);
00968
00969 vsin = vsin * C;
00970 O = O * wAxis.mVec128;
00971 __m128 X = mVec128 - O;
00972
00973 O = O + vsin;
00974 vcos = vcos * X;
00975 O = O + vcos;
00976
00977 return btVector3(O);
00978 #else
00979 btVector3 o = wAxis * wAxis.dot( *this );
00980 btVector3 _x = *this - o;
00981 btVector3 _y;
00982
00983 _y = wAxis.cross( *this );
00984
00985 return ( o + _x * btCos( _angle ) + _y * btSin( _angle ) );
00986 #endif
00987 }
00988
00989 SIMD_FORCE_INLINE long btVector3::maxDot( const btVector3 *array, long array_count, btScalar &dotOut ) const
00990 {
00991 #if defined (BT_USE_SSE) || defined (BT_USE_NEON)
00992 #if defined _WIN32 || defined (BT_USE_SSE)
00993 const long scalar_cutoff = 10;
00994 long _maxdot_large( const float *array, const float *vec, unsigned long array_count, float *dotOut );
00995 #elif defined BT_USE_NEON
00996 const long scalar_cutoff = 4;
00997 extern long (*_maxdot_large)( const float *array, const float *vec, unsigned long array_count, float *dotOut );
00998 #endif
00999 if( array_count < scalar_cutoff )
01000 #else
01001
01002 #endif//BT_USE_SSE || BT_USE_NEON
01003 {
01004 btScalar maxDot = -SIMD_INFINITY;
01005 int i = 0;
01006 int ptIndex = -1;
01007 for( i = 0; i < array_count; i++ )
01008 {
01009 btScalar dot = array[i].dot(*this);
01010
01011 if( dot > maxDot )
01012 {
01013 maxDot = dot;
01014 ptIndex = i;
01015 }
01016 }
01017
01018 dotOut = maxDot;
01019 return ptIndex;
01020 }
01021 #if defined (BT_USE_SSE) || defined (BT_USE_NEON)
01022 return _maxdot_large( (float*) array, (float*) &m_floats[0], array_count, &dotOut );
01023 #endif
01024 }
01025
01026 SIMD_FORCE_INLINE long btVector3::minDot( const btVector3 *array, long array_count, btScalar &dotOut ) const
01027 {
01028 #if defined (BT_USE_SSE) || defined (BT_USE_NEON)
01029 #if defined BT_USE_SSE
01030 const long scalar_cutoff = 10;
01031 long _mindot_large( const float *array, const float *vec, unsigned long array_count, float *dotOut );
01032 #elif defined BT_USE_NEON
01033 const long scalar_cutoff = 4;
01034 extern long (*_mindot_large)( const float *array, const float *vec, unsigned long array_count, float *dotOut );
01035 #else
01036 #error unhandled arch!
01037 #endif
01038
01039 if( array_count < scalar_cutoff )
01040 #endif//BT_USE_SSE || BT_USE_NEON
01041 {
01042 btScalar minDot = SIMD_INFINITY;
01043 int i = 0;
01044 int ptIndex = -1;
01045
01046 for( i = 0; i < array_count; i++ )
01047 {
01048 btScalar dot = array[i].dot(*this);
01049
01050 if( dot < minDot )
01051 {
01052 minDot = dot;
01053 ptIndex = i;
01054 }
01055 }
01056
01057 dotOut = minDot;
01058
01059 return ptIndex;
01060 }
01061 #if defined (BT_USE_SSE) || defined (BT_USE_NEON)
01062 return _mindot_large( (float*) array, (float*) &m_floats[0], array_count, &dotOut );
01063 #endif
01064 }
01065
01066
01067 class btVector4 : public btVector3
01068 {
01069 public:
01070
01071 SIMD_FORCE_INLINE btVector4() {}
01072
01073
01074 SIMD_FORCE_INLINE btVector4(const btScalar& _x, const btScalar& _y, const btScalar& _z,const btScalar& _w)
01075 : btVector3(_x,_y,_z)
01076 {
01077 m_floats[3] = _w;
01078 }
01079
01080 #if (defined (BT_USE_SSE_IN_API)&& defined (BT_USE_SSE)) || defined (BT_USE_NEON)
01081 SIMD_FORCE_INLINE btVector4(const btSimdFloat4 vec)
01082 {
01083 mVec128 = vec;
01084 }
01085
01086 SIMD_FORCE_INLINE btVector4(const btVector3& rhs)
01087 {
01088 mVec128 = rhs.mVec128;
01089 }
01090
01091 SIMD_FORCE_INLINE btVector4&
01092 operator=(const btVector4& v)
01093 {
01094 mVec128 = v.mVec128;
01095 return *this;
01096 }
01097 #endif // #if defined (BT_USE_SSE_IN_API) || defined (BT_USE_NEON)
01098
01099 SIMD_FORCE_INLINE btVector4 absolute4() const
01100 {
01101 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
01102 return btVector4(_mm_and_ps(mVec128, btvAbsfMask));
01103 #elif defined(BT_USE_NEON)
01104 return btVector4(vabsq_f32(mVec128));
01105 #else
01106 return btVector4(
01107 btFabs(m_floats[0]),
01108 btFabs(m_floats[1]),
01109 btFabs(m_floats[2]),
01110 btFabs(m_floats[3]));
01111 #endif
01112 }
01113
01114
01115 btScalar getW() const { return m_floats[3];}
01116
01117
01118 SIMD_FORCE_INLINE int maxAxis4() const
01119 {
01120 int maxIndex = -1;
01121 btScalar maxVal = btScalar(-BT_LARGE_FLOAT);
01122 if (m_floats[0] > maxVal)
01123 {
01124 maxIndex = 0;
01125 maxVal = m_floats[0];
01126 }
01127 if (m_floats[1] > maxVal)
01128 {
01129 maxIndex = 1;
01130 maxVal = m_floats[1];
01131 }
01132 if (m_floats[2] > maxVal)
01133 {
01134 maxIndex = 2;
01135 maxVal =m_floats[2];
01136 }
01137 if (m_floats[3] > maxVal)
01138 {
01139 maxIndex = 3;
01140 maxVal = m_floats[3];
01141 }
01142
01143 return maxIndex;
01144 }
01145
01146
01147 SIMD_FORCE_INLINE int minAxis4() const
01148 {
01149 int minIndex = -1;
01150 btScalar minVal = btScalar(BT_LARGE_FLOAT);
01151 if (m_floats[0] < minVal)
01152 {
01153 minIndex = 0;
01154 minVal = m_floats[0];
01155 }
01156 if (m_floats[1] < minVal)
01157 {
01158 minIndex = 1;
01159 minVal = m_floats[1];
01160 }
01161 if (m_floats[2] < minVal)
01162 {
01163 minIndex = 2;
01164 minVal =m_floats[2];
01165 }
01166 if (m_floats[3] < minVal)
01167 {
01168 minIndex = 3;
01169 minVal = m_floats[3];
01170 }
01171
01172 return minIndex;
01173 }
01174
01175
01176 SIMD_FORCE_INLINE int closestAxis4() const
01177 {
01178 return absolute4().maxAxis4();
01179 }
01180
01181
01182
01183
01191
01192
01193
01194
01195
01196
01197
01204 SIMD_FORCE_INLINE void setValue(const btScalar& _x, const btScalar& _y, const btScalar& _z,const btScalar& _w)
01205 {
01206 m_floats[0]=_x;
01207 m_floats[1]=_y;
01208 m_floats[2]=_z;
01209 m_floats[3]=_w;
01210 }
01211
01212
01213 };
01214
01215
01217 SIMD_FORCE_INLINE void btSwapScalarEndian(const btScalar& sourceVal, btScalar& destVal)
01218 {
01219 #ifdef BT_USE_DOUBLE_PRECISION
01220 unsigned char* dest = (unsigned char*) &destVal;
01221 unsigned char* src = (unsigned char*) &sourceVal;
01222 dest[0] = src[7];
01223 dest[1] = src[6];
01224 dest[2] = src[5];
01225 dest[3] = src[4];
01226 dest[4] = src[3];
01227 dest[5] = src[2];
01228 dest[6] = src[1];
01229 dest[7] = src[0];
01230 #else
01231 unsigned char* dest = (unsigned char*) &destVal;
01232 unsigned char* src = (unsigned char*) &sourceVal;
01233 dest[0] = src[3];
01234 dest[1] = src[2];
01235 dest[2] = src[1];
01236 dest[3] = src[0];
01237 #endif //BT_USE_DOUBLE_PRECISION
01238 }
01240 SIMD_FORCE_INLINE void btSwapVector3Endian(const btVector3& sourceVec, btVector3& destVec)
01241 {
01242 for (int i=0;i<4;i++)
01243 {
01244 btSwapScalarEndian(sourceVec[i],destVec[i]);
01245 }
01246
01247 }
01248
01250 SIMD_FORCE_INLINE void btUnSwapVector3Endian(btVector3& vector)
01251 {
01252
01253 btVector3 swappedVec;
01254 for (int i=0;i<4;i++)
01255 {
01256 btSwapScalarEndian(vector[i],swappedVec[i]);
01257 }
01258 vector = swappedVec;
01259 }
01260
01261 template <class T>
01262 SIMD_FORCE_INLINE void btPlaneSpace1 (const T& n, T& p, T& q)
01263 {
01264 if (btFabs(n[2]) > SIMDSQRT12) {
01265
01266 btScalar a = n[1]*n[1] + n[2]*n[2];
01267 btScalar k = btRecipSqrt (a);
01268 p[0] = 0;
01269 p[1] = -n[2]*k;
01270 p[2] = n[1]*k;
01271
01272 q[0] = a*k;
01273 q[1] = -n[0]*p[2];
01274 q[2] = n[0]*p[1];
01275 }
01276 else {
01277
01278 btScalar a = n[0]*n[0] + n[1]*n[1];
01279 btScalar k = btRecipSqrt (a);
01280 p[0] = -n[1]*k;
01281 p[1] = n[0]*k;
01282 p[2] = 0;
01283
01284 q[0] = -n[2]*p[1];
01285 q[1] = n[2]*p[0];
01286 q[2] = a*k;
01287 }
01288 }
01289
01290
01291 struct btVector3FloatData
01292 {
01293 float m_floats[4];
01294 };
01295
01296 struct btVector3DoubleData
01297 {
01298 double m_floats[4];
01299
01300 };
01301
01302 SIMD_FORCE_INLINE void btVector3::serializeFloat(struct btVector3FloatData& dataOut) const
01303 {
01305 for (int i=0;i<4;i++)
01306 dataOut.m_floats[i] = float(m_floats[i]);
01307 }
01308
01309 SIMD_FORCE_INLINE void btVector3::deSerializeFloat(const struct btVector3FloatData& dataIn)
01310 {
01311 for (int i=0;i<4;i++)
01312 m_floats[i] = btScalar(dataIn.m_floats[i]);
01313 }
01314
01315
01316 SIMD_FORCE_INLINE void btVector3::serializeDouble(struct btVector3DoubleData& dataOut) const
01317 {
01319 for (int i=0;i<4;i++)
01320 dataOut.m_floats[i] = double(m_floats[i]);
01321 }
01322
01323 SIMD_FORCE_INLINE void btVector3::deSerializeDouble(const struct btVector3DoubleData& dataIn)
01324 {
01325 for (int i=0;i<4;i++)
01326 m_floats[i] = btScalar(dataIn.m_floats[i]);
01327 }
01328
01329
01330 SIMD_FORCE_INLINE void btVector3::serialize(struct btVector3Data& dataOut) const
01331 {
01333 for (int i=0;i<4;i++)
01334 dataOut.m_floats[i] = m_floats[i];
01335 }
01336
01337 SIMD_FORCE_INLINE void btVector3::deSerialize(const struct btVector3Data& dataIn)
01338 {
01339 for (int i=0;i<4;i++)
01340 m_floats[i] = dataIn.m_floats[i];
01341 }
01342
01343 #endif //BT_VECTOR3_H