--- /dev/null
+libovr.dll
--- /dev/null
+/********************************************************************************/ /**\r
+ \file OVR_CAPI_Util.h\r
+ \brief This header provides LibOVR utility function declarations\r
+ \copyright Copyright 2015-2016 Oculus VR, LLC All Rights reserved.\r
+ *************************************************************************************/\r
+\r
+#ifndef OVR_CAPI_Util_h\r
+#define OVR_CAPI_Util_h\r
+\r
+#include "OVR_CAPI.h"\r
+\r
+#ifdef __cplusplus\r
+extern "C" {\r
+#endif\r
+\r
+/// Enumerates modifications to the projection matrix based on the application's needs.\r
+///\r
+/// \see ovrMatrix4f_Projection\r
+///\r
+typedef enum ovrProjectionModifier_ {\r
+ /// Use for generating a default projection matrix that is:\r
+ /// * Right-handed.\r
+ /// * Near depth values stored in the depth buffer are smaller than far depth values.\r
+ /// * Both near and far are explicitly defined.\r
+ /// * With a clipping range that is (0 to w).\r
+ ovrProjection_None = 0x00,\r
+\r
+ /// Enable if using left-handed transformations in your application.\r
+ ovrProjection_LeftHanded = 0x01,\r
+\r
+ /// After the projection transform is applied, far values stored in the depth buffer will be less\r
+ /// than closer depth values.\r
+ /// NOTE: Enable only if the application is using a floating-point depth buffer for proper\r
+ /// precision.\r
+ ovrProjection_FarLessThanNear = 0x02,\r
+\r
+ /// When this flag is used, the zfar value pushed into ovrMatrix4f_Projection() will be ignored\r
+ /// NOTE: Enable only if ovrProjection_FarLessThanNear is also enabled where the far clipping\r
+ /// plane will be pushed to infinity.\r
+ ovrProjection_FarClipAtInfinity = 0x04,\r
+\r
+ /// Enable if the application is rendering with OpenGL and expects a projection matrix with a\r
+ /// clipping range of (-w to w).\r
+ /// Ignore this flag if your application already handles the conversion from D3D range (0 to w) to\r
+ /// OpenGL.\r
+ ovrProjection_ClipRangeOpenGL = 0x08,\r
+} ovrProjectionModifier;\r
+\r
+/// Return values for ovr_Detect.\r
+///\r
+/// \see ovr_Detect\r
+///\r
+typedef struct OVR_ALIGNAS(8) ovrDetectResult_ {\r
+ /// Is ovrFalse when the Oculus Service is not running.\r
+ /// This means that the Oculus Service is either uninstalled or stopped.\r
+ /// IsOculusHMDConnected will be ovrFalse in this case.\r
+ /// Is ovrTrue when the Oculus Service is running.\r
+ /// This means that the Oculus Service is installed and running.\r
+ /// IsOculusHMDConnected will reflect the state of the HMD.\r
+ ovrBool IsOculusServiceRunning;\r
+\r
+ /// Is ovrFalse when an Oculus HMD is not detected.\r
+ /// If the Oculus Service is not running, this will be ovrFalse.\r
+ /// Is ovrTrue when an Oculus HMD is detected.\r
+ /// This implies that the Oculus Service is also installed and running.\r
+ ovrBool IsOculusHMDConnected;\r
+\r
+ OVR_UNUSED_STRUCT_PAD(pad0, 6) ///< \internal struct padding\r
+\r
+} ovrDetectResult;\r
+\r
+OVR_STATIC_ASSERT(sizeof(ovrDetectResult) == 8, "ovrDetectResult size mismatch");\r
+\r
+/// Modes used to generate Touch Haptics from audio PCM buffer.\r
+///\r
+typedef enum ovrHapticsGenMode_ {\r
+ /// Point sample original signal at Haptics frequency\r
+ ovrHapticsGenMode_PointSample,\r
+ ovrHapticsGenMode_Count\r
+} ovrHapticsGenMode;\r
+\r
+/// Store audio PCM data (as 32b float samples) for an audio channel.\r
+/// Note: needs to be released with ovr_ReleaseAudioChannelData to avoid memory leak.\r
+///\r
+typedef struct ovrAudioChannelData_ {\r
+ /// Samples stored as floats [-1.0f, 1.0f].\r
+ const float* Samples;\r
+\r
+ /// Number of samples\r
+ int SamplesCount;\r
+\r
+ /// Frequency (e.g. 44100)\r
+ int Frequency;\r
+} ovrAudioChannelData;\r
+\r
+/// Store a full Haptics clip, which can be used as data source for multiple ovrHapticsBuffers.\r
+///\r
+typedef struct ovrHapticsClip_ {\r
+ /// Samples stored in opaque format\r
+ const void* Samples;\r
+\r
+ /// Number of samples\r
+ int SamplesCount;\r
+} ovrHapticsClip;\r
+\r
+/// Detects Oculus Runtime and Device Status\r
+///\r
+/// Checks for Oculus Runtime and Oculus HMD device status without loading the LibOVRRT\r
+/// shared library. This may be called before ovr_Initialize() to help decide whether or\r
+/// not to initialize LibOVR.\r
+///\r
+/// \param[in] timeoutMilliseconds Specifies a timeout to wait for HMD to be attached or 0 to poll.\r
+///\r
+/// \return Returns an ovrDetectResult object indicating the result of detection.\r
+///\r
+/// \see ovrDetectResult\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrDetectResult) ovr_Detect(int timeoutMilliseconds);\r
+\r
+// On the Windows platform,\r
+#ifdef _WIN32\r
+/// This is the Windows Named Event name that is used to check for HMD connected state.\r
+#define OVR_HMD_CONNECTED_EVENT_NAME L"OculusHMDConnected"\r
+#endif // _WIN32\r
+\r
+/// Used to generate projection from ovrEyeDesc::Fov.\r
+///\r
+/// \param[in] fov Specifies the ovrFovPort to use.\r
+/// \param[in] znear Distance to near Z limit.\r
+/// \param[in] zfar Distance to far Z limit.\r
+/// \param[in] projectionModFlags A combination of the ovrProjectionModifier flags.\r
+///\r
+/// \return Returns the calculated projection matrix.\r
+///\r
+/// \see ovrProjectionModifier\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrMatrix4f)\r
+ovrMatrix4f_Projection(ovrFovPort fov, float znear, float zfar, unsigned int projectionModFlags);\r
+\r
+/// Extracts the required data from the result of ovrMatrix4f_Projection.\r
+///\r
+/// \param[in] projection Specifies the project matrix from which to\r
+/// extract ovrTimewarpProjectionDesc.\r
+/// \param[in] projectionModFlags A combination of the ovrProjectionModifier flags.\r
+/// \return Returns the extracted ovrTimewarpProjectionDesc.\r
+/// \see ovrTimewarpProjectionDesc\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrTimewarpProjectionDesc)\r
+ovrTimewarpProjectionDesc_FromProjection(ovrMatrix4f projection, unsigned int projectionModFlags);\r
+\r
+/// Generates an orthographic sub-projection.\r
+///\r
+/// Used for 2D rendering, Y is down.\r
+///\r
+/// \param[in] projection The perspective matrix that the orthographic matrix is derived from.\r
+/// \param[in] orthoScale Equal to 1.0f / pixelsPerTanAngleAtCenter.\r
+/// \param[in] orthoDistance Equal to the distance from the camera in meters, such as 0.8m.\r
+/// \param[in] HmdToEyeOffsetX Specifies the offset of the eye from the center.\r
+///\r
+/// \return Returns the calculated projection matrix.\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrMatrix4f)\r
+ovrMatrix4f_OrthoSubProjection(\r
+ ovrMatrix4f projection,\r
+ ovrVector2f orthoScale,\r
+ float orthoDistance,\r
+ float HmdToEyeOffsetX);\r
+\r
+/// Computes offset eye poses based on headPose returned by ovrTrackingState.\r
+///\r
+/// \param[in] headPose Indicates the HMD position and orientation to use for the calculation.\r
+/// \param[in] hmdToEyePose Can be ovrEyeRenderDesc.HmdToEyePose returned from\r
+/// ovr_GetRenderDesc. For monoscopic rendering, use a position vector that is average\r
+/// of the two position vectors for each eyes.\r
+/// \param[out] outEyePoses If outEyePoses are used for rendering, they should be passed to\r
+/// ovr_SubmitFrame in ovrLayerEyeFov::RenderPose or ovrLayerEyeFovDepth::RenderPose.\r
+///\r
+#undef ovr_CalcEyePoses\r
+OVR_PUBLIC_FUNCTION(void)\r
+ovr_CalcEyePoses(ovrPosef headPose, const ovrVector3f hmdToEyeOffset[2], ovrPosef outEyePoses[2]);\r
+OVR_PRIVATE_FUNCTION(void)\r
+ovr_CalcEyePoses2(ovrPosef headPose, const ovrPosef HmdToEyePose[2], ovrPosef outEyePoses[2]);\r
+#define ovr_CalcEyePoses ovr_CalcEyePoses2\r
+\r
+/// Returns the predicted head pose in outHmdTrackingState and offset eye poses in outEyePoses.\r
+///\r
+/// This is a thread-safe function where caller should increment frameIndex with every frame\r
+/// and pass that index where applicable to functions called on the rendering thread.\r
+/// Assuming outEyePoses are used for rendering, it should be passed as a part of ovrLayerEyeFov.\r
+/// The caller does not need to worry about applying HmdToEyePose to the returned outEyePoses\r
+/// variables.\r
+///\r
+/// \param[in] hmd Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] frameIndex Specifies the targeted frame index, or 0 to refer to one frame after\r
+/// the last time ovr_SubmitFrame was called.\r
+/// \param[in] latencyMarker Specifies that this call is the point in time where\r
+/// the "App-to-Mid-Photon" latency timer starts from. If a given ovrLayer\r
+/// provides "SensorSampleTimestamp", that will override the value stored here.\r
+/// \param[in] hmdToEyePose Can be ovrEyeRenderDesc.HmdToEyePose returned from\r
+/// ovr_GetRenderDesc. For monoscopic rendering, use a position vector that is average\r
+/// of the two position vectors for each eyes.\r
+/// \param[out] outEyePoses The predicted eye poses.\r
+/// \param[out] outSensorSampleTime The time when this function was called. May be NULL, in which\r
+/// case it is ignored.\r
+///\r
+#undef ovr_GetEyePoses\r
+OVR_PUBLIC_FUNCTION(void)\r
+ovr_GetEyePoses(\r
+ ovrSession session,\r
+ long long frameIndex,\r
+ ovrBool latencyMarker,\r
+ const ovrVector3f hmdToEyeOffset[2],\r
+ ovrPosef outEyePoses[2],\r
+ double* outSensorSampleTime);\r
+OVR_PRIVATE_FUNCTION(void)\r
+ovr_GetEyePoses2(\r
+ ovrSession session,\r
+ long long frameIndex,\r
+ ovrBool latencyMarker,\r
+ const ovrPosef HmdToEyePose[2],\r
+ ovrPosef outEyePoses[2],\r
+ double* outSensorSampleTime);\r
+#define ovr_GetEyePoses ovr_GetEyePoses2\r
+\r
+/// Tracking poses provided by the SDK come in a right-handed coordinate system. If an application\r
+/// is passing in ovrProjection_LeftHanded into ovrMatrix4f_Projection, then it should also use\r
+/// this function to flip the HMD tracking poses to be left-handed.\r
+///\r
+/// While this utility function is intended to convert a left-handed ovrPosef into a right-handed\r
+/// coordinate system, it will also work for converting right-handed to left-handed since the\r
+/// flip operation is the same for both cases.\r
+///\r
+/// \param[in] inPose that is right-handed\r
+/// \param[out] outPose that is requested to be left-handed (can be the same pointer to inPose)\r
+///\r
+OVR_PUBLIC_FUNCTION(void) ovrPosef_FlipHandedness(const ovrPosef* inPose, ovrPosef* outPose);\r
+\r
+/// Reads an audio channel from Wav (Waveform Audio File) data.\r
+/// Input must be a byte buffer representing a valid Wav file. Audio samples from the specified\r
+/// channel are read,\r
+/// converted to float [-1.0f, 1.0f] and returned through ovrAudioChannelData.\r
+///\r
+/// Supported formats: PCM 8b, 16b, 32b and IEEE float (little-endian only).\r
+///\r
+/// \param[out] outAudioChannel output audio channel data.\r
+/// \param[in] inputData a binary buffer representing a valid Wav file data.\r
+/// \param[in] dataSizeInBytes size of the buffer in bytes.\r
+/// \param[in] stereoChannelToUse audio channel index to extract (0 for mono).\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_ReadWavFromBuffer(\r
+ ovrAudioChannelData* outAudioChannel,\r
+ const void* inputData,\r
+ int dataSizeInBytes,\r
+ int stereoChannelToUse);\r
+\r
+/// Generates playable Touch Haptics data from an audio channel.\r
+///\r
+/// \param[out] outHapticsClip generated Haptics clip.\r
+/// \param[in] audioChannel input audio channel data.\r
+/// \param[in] genMode mode used to convert and audio channel data to Haptics data.\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_GenHapticsFromAudioData(\r
+ ovrHapticsClip* outHapticsClip,\r
+ const ovrAudioChannelData* audioChannel,\r
+ ovrHapticsGenMode genMode);\r
+\r
+/// Releases memory allocated for ovrAudioChannelData. Must be called to avoid memory leak.\r
+/// \param[in] audioChannel pointer to an audio channel\r
+///\r
+OVR_PUBLIC_FUNCTION(void) ovr_ReleaseAudioChannelData(ovrAudioChannelData* audioChannel);\r
+\r
+/// Releases memory allocated for ovrHapticsClip. Must be called to avoid memory leak.\r
+/// \param[in] hapticsClip pointer to a haptics clip\r
+///\r
+OVR_PUBLIC_FUNCTION(void) ovr_ReleaseHapticsClip(ovrHapticsClip* hapticsClip);\r
+\r
+#ifdef __cplusplus\r
+} /* extern "C" */\r
+#endif\r
+\r
+#endif // Header include guard\r
--- /dev/null
+/********************************************************************************/ /**\r
+ \file OVR_Math.h\r
+ \brief Implementation of 3D primitives such as vectors, matrices.\r
+ \copyright Copyright 2014-2016 Oculus VR, LLC All Rights reserved.\r
+ *************************************************************************************/\r
+\r
+#ifndef OVR_Math_h\r
+#define OVR_Math_h\r
+\r
+// This file is intended to be independent of the rest of LibOVR and LibOVRKernel and thus\r
+// has no #include dependencies on either.\r
+\r
+#include <math.h>\r
+#include <stdint.h>\r
+#include <stdlib.h>\r
+#include <stdio.h>\r
+#include <string.h>\r
+#include <float.h>\r
+\r
+#ifndef OVR_EXCLUDE_CAPI_FROM_MATH\r
+#include "../OVR_CAPI.h" // Required due to a dependence on the ovrFovPort_ declaration.\r
+#endif\r
+\r
+#if defined(_MSC_VER)\r
+#pragma warning(push)\r
+#pragma warning(disable : 4127) // conditional expression is constant\r
+\r
+#if _MSC_VER < 1800 // isfinite was introduced in VS2013\r
+#define isfinite(x) _finite((x))\r
+#endif\r
+#endif\r
+\r
+#if defined(_MSC_VER)\r
+#define OVRMath_sprintf sprintf_s\r
+#else\r
+#define OVRMath_sprintf snprintf\r
+#endif\r
+\r
+//-------------------------------------------------------------------------------------\r
+// ***** OVR_MATH_ASSERT\r
+//\r
+// Independent debug break implementation for OVR_Math.h.\r
+\r
+#if !defined(OVR_MATH_DEBUG_BREAK)\r
+#if defined(_DEBUG)\r
+#if defined(_MSC_VER)\r
+#define OVR_MATH_DEBUG_BREAK __debugbreak()\r
+#else\r
+#define OVR_MATH_DEBUG_BREAK __builtin_trap()\r
+#endif\r
+#else\r
+#define OVR_MATH_DEBUG_BREAK ((void)0)\r
+#endif\r
+#endif\r
+\r
+//-------------------------------------------------------------------------------------\r
+// ***** OVR_MATH_ASSERT\r
+//\r
+// Independent OVR_MATH_ASSERT implementation for OVR_Math.h.\r
+\r
+#if !defined(OVR_MATH_ASSERT)\r
+#if defined(_DEBUG)\r
+#define OVR_MATH_ASSERT(p) \\r
+ if (!(p)) { \\r
+ OVR_MATH_DEBUG_BREAK; \\r
+ }\r
+#else\r
+#define OVR_MATH_ASSERT(p) ((void)0)\r
+#endif\r
+#endif\r
+\r
+//-------------------------------------------------------------------------------------\r
+// ***** OVR_MATH_STATIC_ASSERT\r
+//\r
+// Independent OVR_MATH_ASSERT implementation for OVR_Math.h.\r
+\r
+#if !defined(OVR_MATH_STATIC_ASSERT)\r
+#if defined(__cplusplus) && \\r
+ ((defined(_MSC_VER) && (defined(_MSC_VER) >= 1600)) || defined(__GXX_EXPERIMENTAL_CXX0X__) || \\r
+ (__cplusplus >= 201103L))\r
+#define OVR_MATH_STATIC_ASSERT static_assert\r
+#else\r
+#if !defined(OVR_SA_UNUSED)\r
+#if defined(__GNUC__) || defined(__clang__)\r
+#define OVR_SA_UNUSED __attribute__((unused))\r
+#else\r
+#define OVR_SA_UNUSED\r
+#endif\r
+#define OVR_SA_PASTE(a, b) a##b\r
+#define OVR_SA_HELP(a, b) OVR_SA_PASTE(a, b)\r
+#endif\r
+\r
+#define OVR_MATH_STATIC_ASSERT(expression, msg) \\r
+ typedef char OVR_SA_HELP(compileTimeAssert, __LINE__)[((expression) != 0) ? 1 : -1] OVR_SA_UNUSED\r
+#endif\r
+#endif\r
+\r
+namespace OVR {\r
+\r
+template <class T>\r
+const T OVRMath_Min(const T a, const T b) {\r
+ return (a < b) ? a : b;\r
+}\r
+\r
+template <class T>\r
+const T OVRMath_Max(const T a, const T b) {\r
+ return (b < a) ? a : b;\r
+}\r
+\r
+template <class T>\r
+void OVRMath_Swap(T& a, T& b) {\r
+ T temp(a);\r
+ a = b;\r
+ b = temp;\r
+}\r
+\r
+//-------------------------------------------------------------------------------------\r
+// ***** Constants for 3D world/axis definitions.\r
+\r
+// Definitions of axes for coordinate and rotation conversions.\r
+enum Axis { Axis_X = 0, Axis_Y = 1, Axis_Z = 2 };\r
+\r
+// RotateDirection describes the rotation direction around an axis, interpreted as follows:\r
+// CW - Clockwise while looking "down" from positive axis towards the origin.\r
+// CCW - Counter-clockwise while looking from the positive axis towards the origin,\r
+// which is in the negative axis direction.\r
+// CCW is the default for the RHS coordinate system. Oculus standard RHS coordinate\r
+// system defines Y up, X right, and Z back (pointing out from the screen). In this\r
+// system Rotate_CCW around Z will specifies counter-clockwise rotation in XY plane.\r
+enum RotateDirection { Rotate_CCW = 1, Rotate_CW = -1 };\r
+\r
+// Constants for right handed and left handed coordinate systems\r
+enum HandedSystem { Handed_R = 1, Handed_L = -1 };\r
+\r
+// AxisDirection describes which way the coordinate axis points. Used by WorldAxes.\r
+enum AxisDirection {\r
+ Axis_Up = 2,\r
+ Axis_Down = -2,\r
+ Axis_Right = 1,\r
+ Axis_Left = -1,\r
+ Axis_In = 3,\r
+ Axis_Out = -3\r
+};\r
+\r
+struct WorldAxes {\r
+ AxisDirection XAxis, YAxis, ZAxis;\r
+\r
+ WorldAxes(AxisDirection x, AxisDirection y, AxisDirection z) : XAxis(x), YAxis(y), ZAxis(z) {\r
+ OVR_MATH_ASSERT(abs(x) != abs(y) && abs(y) != abs(z) && abs(z) != abs(x));\r
+ }\r
+};\r
+\r
+} // namespace OVR\r
+\r
+//------------------------------------------------------------------------------------//\r
+// ***** C Compatibility Types\r
+\r
+// These declarations are used to support conversion between C types used in\r
+// LibOVR C interfaces and their C++ versions. As an example, they allow passing\r
+// Vector3f into a function that expects ovrVector3f.\r
+\r
+typedef struct ovrQuatf_ ovrQuatf;\r
+typedef struct ovrQuatd_ ovrQuatd;\r
+typedef struct ovrSizei_ ovrSizei;\r
+typedef struct ovrSizef_ ovrSizef;\r
+typedef struct ovrSized_ ovrSized;\r
+typedef struct ovrRecti_ ovrRecti;\r
+typedef struct ovrVector2i_ ovrVector2i;\r
+typedef struct ovrVector2f_ ovrVector2f;\r
+typedef struct ovrVector2d_ ovrVector2d;\r
+typedef struct ovrVector3f_ ovrVector3f;\r
+typedef struct ovrVector3d_ ovrVector3d;\r
+typedef struct ovrVector4f_ ovrVector4f;\r
+typedef struct ovrVector4d_ ovrVector4d;\r
+typedef struct ovrMatrix2f_ ovrMatrix2f;\r
+typedef struct ovrMatrix2d_ ovrMatrix2d;\r
+typedef struct ovrMatrix3f_ ovrMatrix3f;\r
+typedef struct ovrMatrix3d_ ovrMatrix3d;\r
+typedef struct ovrMatrix4f_ ovrMatrix4f;\r
+typedef struct ovrMatrix4d_ ovrMatrix4d;\r
+typedef struct ovrPosef_ ovrPosef;\r
+typedef struct ovrPosed_ ovrPosed;\r
+typedef struct ovrPoseStatef_ ovrPoseStatef;\r
+typedef struct ovrPoseStated_ ovrPoseStated;\r
+typedef struct ovrFovPort_ ovrFovPort;\r
+\r
+namespace OVR {\r
+\r
+// Forward-declare our templates.\r
+template <class T>\r
+class Quat;\r
+template <class T>\r
+class Size;\r
+template <class T>\r
+class Rect;\r
+template <class T>\r
+class Vector2;\r
+template <class T>\r
+class Vector3;\r
+template <class T>\r
+class Vector4;\r
+template <class T>\r
+class Matrix2;\r
+template <class T>\r
+class Matrix3;\r
+template <class T>\r
+class Matrix4;\r
+template <class T>\r
+class Pose;\r
+template <class T>\r
+class PoseState;\r
+struct FovPort;\r
+\r
+// CompatibleTypes::Type is used to lookup a compatible C-version of a C++ class.\r
+template <class C>\r
+struct CompatibleTypes {\r
+ // Declaration here seems necessary for MSVC; specializations are\r
+ // used instead.\r
+ typedef struct {\r
+ } Type;\r
+};\r
+\r
+// Specializations providing CompatibleTypes::Type value.\r
+template <>\r
+struct CompatibleTypes<Quat<float>> {\r
+ typedef ovrQuatf Type;\r
+};\r
+template <>\r
+struct CompatibleTypes<Quat<double>> {\r
+ typedef ovrQuatd Type;\r
+};\r
+template <>\r
+struct CompatibleTypes<Matrix2<float>> {\r
+ typedef ovrMatrix2f Type;\r
+};\r
+template <>\r
+struct CompatibleTypes<Matrix2<double>> {\r
+ typedef ovrMatrix2d Type;\r
+};\r
+template <>\r
+struct CompatibleTypes<Matrix3<float>> {\r
+ typedef ovrMatrix3f Type;\r
+};\r
+template <>\r
+struct CompatibleTypes<Matrix3<double>> {\r
+ typedef ovrMatrix3d Type;\r
+};\r
+template <>\r
+struct CompatibleTypes<Matrix4<float>> {\r
+ typedef ovrMatrix4f Type;\r
+};\r
+template <>\r
+struct CompatibleTypes<Matrix4<double>> {\r
+ typedef ovrMatrix4d Type;\r
+};\r
+template <>\r
+struct CompatibleTypes<Size<int>> {\r
+ typedef ovrSizei Type;\r
+};\r
+template <>\r
+struct CompatibleTypes<Size<float>> {\r
+ typedef ovrSizef Type;\r
+};\r
+template <>\r
+struct CompatibleTypes<Size<double>> {\r
+ typedef ovrSized Type;\r
+};\r
+template <>\r
+struct CompatibleTypes<Rect<int>> {\r
+ typedef ovrRecti Type;\r
+};\r
+template <>\r
+struct CompatibleTypes<Vector2<int>> {\r
+ typedef ovrVector2i Type;\r
+};\r
+template <>\r
+struct CompatibleTypes<Vector2<float>> {\r
+ typedef ovrVector2f Type;\r
+};\r
+template <>\r
+struct CompatibleTypes<Vector2<double>> {\r
+ typedef ovrVector2d Type;\r
+};\r
+template <>\r
+struct CompatibleTypes<Vector3<float>> {\r
+ typedef ovrVector3f Type;\r
+};\r
+template <>\r
+struct CompatibleTypes<Vector3<double>> {\r
+ typedef ovrVector3d Type;\r
+};\r
+template <>\r
+struct CompatibleTypes<Vector4<float>> {\r
+ typedef ovrVector4f Type;\r
+};\r
+template <>\r
+struct CompatibleTypes<Vector4<double>> {\r
+ typedef ovrVector4d Type;\r
+};\r
+template <>\r
+struct CompatibleTypes<Pose<float>> {\r
+ typedef ovrPosef Type;\r
+};\r
+template <>\r
+struct CompatibleTypes<Pose<double>> {\r
+ typedef ovrPosed Type;\r
+};\r
+template <>\r
+struct CompatibleTypes<FovPort> {\r
+ typedef ovrFovPort Type;\r
+};\r
+\r
+//------------------------------------------------------------------------------------//\r
+// ***** Math\r
+//\r
+// Math class contains constants and functions. This class is a template specialized\r
+// per type, with Math<float> and Math<double> being distinct.\r
+template <class T>\r
+class Math {\r
+ public:\r
+ // By default, support explicit conversion to float. This allows Vector2<int> to\r
+ // compile, for example.\r
+ typedef float OtherFloatType;\r
+\r
+ static int Tolerance() {\r
+ return 0;\r
+ } // Default value so integer types compile\r
+};\r
+\r
+//------------------------------------------------------------------------------------//\r
+// ***** double constants\r
+#define MATH_DOUBLE_PI 3.14159265358979323846\r
+#define MATH_DOUBLE_TWOPI (2 * MATH_DOUBLE_PI)\r
+#define MATH_DOUBLE_PIOVER2 (0.5 * MATH_DOUBLE_PI)\r
+#define MATH_DOUBLE_PIOVER4 (0.25 * MATH_DOUBLE_PI)\r
+#define MATH_FLOAT_MAXVALUE (FLT_MAX)\r
+\r
+#define MATH_DOUBLE_RADTODEGREEFACTOR (360.0 / MATH_DOUBLE_TWOPI)\r
+#define MATH_DOUBLE_DEGREETORADFACTOR (MATH_DOUBLE_TWOPI / 360.0)\r
+\r
+#define MATH_DOUBLE_E 2.71828182845904523536\r
+#define MATH_DOUBLE_LOG2E 1.44269504088896340736\r
+#define MATH_DOUBLE_LOG10E 0.434294481903251827651\r
+#define MATH_DOUBLE_LN2 0.693147180559945309417\r
+#define MATH_DOUBLE_LN10 2.30258509299404568402\r
+\r
+#define MATH_DOUBLE_SQRT2 1.41421356237309504880\r
+#define MATH_DOUBLE_SQRT1_2 0.707106781186547524401\r
+\r
+#define MATH_DOUBLE_TOLERANCE \\r
+ 1e-12 // a default number for value equality tolerance: about 4500*Epsilon;\r
+#define MATH_DOUBLE_SINGULARITYRADIUS \\r
+ 1e-12 // about 1-cos(.0001 degree), for gimbal lock numerical problems\r
+\r
+#define MATH_DOUBLE_HUGENUMBER 1.3407807929942596e+154\r
+#define MATH_DOUBLE_SMALLESTNONDENORMAL 2.2250738585072014e-308\r
+\r
+//------------------------------------------------------------------------------------//\r
+// ***** float constants\r
+#define MATH_FLOAT_PI float(MATH_DOUBLE_PI)\r
+#define MATH_FLOAT_TWOPI float(MATH_DOUBLE_TWOPI)\r
+#define MATH_FLOAT_PIOVER2 float(MATH_DOUBLE_PIOVER2)\r
+#define MATH_FLOAT_PIOVER4 float(MATH_DOUBLE_PIOVER4)\r
+\r
+#define MATH_FLOAT_RADTODEGREEFACTOR float(MATH_DOUBLE_RADTODEGREEFACTOR)\r
+#define MATH_FLOAT_DEGREETORADFACTOR float(MATH_DOUBLE_DEGREETORADFACTOR)\r
+\r
+#define MATH_FLOAT_E float(MATH_DOUBLE_E)\r
+#define MATH_FLOAT_LOG2E float(MATH_DOUBLE_LOG2E)\r
+#define MATH_FLOAT_LOG10E float(MATH_DOUBLE_LOG10E)\r
+#define MATH_FLOAT_LN2 float(MATH_DOUBLE_LN2)\r
+#define MATH_FLOAT_LN10 float(MATH_DOUBLE_LN10)\r
+\r
+#define MATH_FLOAT_SQRT2 float(MATH_DOUBLE_SQRT2)\r
+#define MATH_FLOAT_SQRT1_2 float(MATH_DOUBLE_SQRT1_2)\r
+\r
+#define MATH_FLOAT_TOLERANCE \\r
+ 1e-5f // a default number for value equality tolerance: 1e-5, about 84*EPSILON;\r
+#define MATH_FLOAT_SINGULARITYRADIUS \\r
+ 1e-7f // about 1-cos(.025 degree), for gimbal lock numerical problems\r
+\r
+#define MATH_FLOAT_HUGENUMBER 1.8446742974197924e+019f\r
+#define MATH_FLOAT_SMALLESTNONDENORMAL 1.1754943508222875e-038f\r
+\r
+// Single-precision Math constants class.\r
+template <>\r
+class Math<float> {\r
+ public:\r
+ typedef double OtherFloatType;\r
+\r
+ static inline float MaxValue() {\r
+ return FLT_MAX;\r
+ };\r
+ static inline float Tolerance() {\r
+ return MATH_FLOAT_TOLERANCE;\r
+ }; // a default number for value equality tolerance\r
+ static inline float SingularityRadius() {\r
+ return MATH_FLOAT_SINGULARITYRADIUS;\r
+ }; // for gimbal lock numerical problems\r
+ static inline float HugeNumber() {\r
+ return MATH_FLOAT_HUGENUMBER;\r
+ }\r
+ static inline float SmallestNonDenormal() {\r
+ return MATH_FLOAT_SMALLESTNONDENORMAL;\r
+ }\r
+};\r
+\r
+// Double-precision Math constants class\r
+template <>\r
+class Math<double> {\r
+ public:\r
+ typedef float OtherFloatType;\r
+\r
+ static inline double Tolerance() {\r
+ return MATH_DOUBLE_TOLERANCE;\r
+ }; // a default number for value equality tolerance\r
+ static inline double SingularityRadius() {\r
+ return MATH_DOUBLE_SINGULARITYRADIUS;\r
+ }; // for gimbal lock numerical problems\r
+ static inline double HugeNumber() {\r
+ return MATH_DOUBLE_HUGENUMBER;\r
+ }\r
+ static inline double SmallestNonDenormal() {\r
+ return MATH_DOUBLE_SMALLESTNONDENORMAL;\r
+ }\r
+};\r
+\r
+typedef Math<float> Mathf;\r
+typedef Math<double> Mathd;\r
+\r
+// Conversion functions between degrees and radians\r
+// (non-templated to ensure passing int arguments causes warning)\r
+inline float RadToDegree(float rad) {\r
+ return rad * MATH_FLOAT_RADTODEGREEFACTOR;\r
+}\r
+inline double RadToDegree(double rad) {\r
+ return rad * MATH_DOUBLE_RADTODEGREEFACTOR;\r
+}\r
+\r
+inline float DegreeToRad(float deg) {\r
+ return deg * MATH_FLOAT_DEGREETORADFACTOR;\r
+}\r
+inline double DegreeToRad(double deg) {\r
+ return deg * MATH_DOUBLE_DEGREETORADFACTOR;\r
+}\r
+\r
+// Square function\r
+template <class T>\r
+inline T Sqr(T x) {\r
+ return x * x;\r
+}\r
+\r
+// MERGE_MOBILE_SDK\r
+// Safe reciprocal square root.\r
+template <class T>\r
+T RcpSqrt(const T f) {\r
+ return (f >= Math<T>::SmallestNonDenormal()) ? static_cast<T>(1.0 / sqrt(f))\r
+ : Math<T>::HugeNumber();\r
+}\r
+// MERGE_MOBILE_SDK\r
+\r
+// Sign: returns 0 if x == 0, -1 if x < 0, and 1 if x > 0\r
+template <class T>\r
+inline T Sign(T x) {\r
+ return (x != T(0)) ? (x < T(0) ? T(-1) : T(1)) : T(0);\r
+}\r
+\r
+// Numerically stable acos function\r
+inline float Acos(float x) {\r
+ return (x > 1.0f) ? 0.0f : (x < -1.0f) ? MATH_FLOAT_PI : acosf(x);\r
+}\r
+inline double Acos(double x) {\r
+ return (x > 1.0) ? 0.0 : (x < -1.0) ? MATH_DOUBLE_PI : acos(x);\r
+}\r
+\r
+// Numerically stable asin function\r
+inline float Asin(float x) {\r
+ return (x > 1.0f) ? MATH_FLOAT_PIOVER2 : (x < -1.0f) ? -MATH_FLOAT_PIOVER2 : asinf(x);\r
+}\r
+inline double Asin(double x) {\r
+ return (x > 1.0) ? MATH_DOUBLE_PIOVER2 : (x < -1.0) ? -MATH_DOUBLE_PIOVER2 : asin(x);\r
+}\r
+\r
+template <class T>\r
+class Quat;\r
+\r
+//-------------------------------------------------------------------------------------\r
+// ***** Vector2<>\r
+\r
+// Vector2f (Vector2d) represents a 2-dimensional vector or point in space,\r
+// consisting of coordinates x and y\r
+\r
+template <class T>\r
+class Vector2 {\r
+ public:\r
+ typedef T ElementType;\r
+ static const size_t ElementCount = 2;\r
+\r
+ T x, y;\r
+\r
+ Vector2() : x(0), y(0) {}\r
+ Vector2(T x_, T y_) : x(x_), y(y_) {}\r
+ explicit Vector2(T s) : x(s), y(s) {}\r
+ explicit Vector2(const Vector2<typename Math<T>::OtherFloatType>& src)\r
+ : x((T)src.x), y((T)src.y) {}\r
+\r
+ static Vector2 Zero() {\r
+ return Vector2(0, 0);\r
+ }\r
+\r
+ // C-interop support.\r
+ typedef typename CompatibleTypes<Vector2<T>>::Type CompatibleType;\r
+\r
+ Vector2(const CompatibleType& s) : x(s.x), y(s.y) {}\r
+\r
+ operator const CompatibleType&() const {\r
+ OVR_MATH_STATIC_ASSERT(\r
+ sizeof(Vector2<T>) == sizeof(CompatibleType), "sizeof(Vector2<T>) failure");\r
+ return reinterpret_cast<const CompatibleType&>(*this);\r
+ }\r
+\r
+ bool operator==(const Vector2& b) const {\r
+ return x == b.x && y == b.y;\r
+ }\r
+ bool operator!=(const Vector2& b) const {\r
+ return x != b.x || y != b.y;\r
+ }\r
+\r
+ Vector2 operator+(const Vector2& b) const {\r
+ return Vector2(x + b.x, y + b.y);\r
+ }\r
+ Vector2& operator+=(const Vector2& b) {\r
+ x += b.x;\r
+ y += b.y;\r
+ return *this;\r
+ }\r
+ Vector2 operator-(const Vector2& b) const {\r
+ return Vector2(x - b.x, y - b.y);\r
+ }\r
+ Vector2& operator-=(const Vector2& b) {\r
+ x -= b.x;\r
+ y -= b.y;\r
+ return *this;\r
+ }\r
+ Vector2 operator-() const {\r
+ return Vector2(-x, -y);\r
+ }\r
+\r
+ // Scalar multiplication/division scales vector.\r
+ Vector2 operator*(T s) const {\r
+ return Vector2(x * s, y * s);\r
+ }\r
+ Vector2& operator*=(T s) {\r
+ x *= s;\r
+ y *= s;\r
+ return *this;\r
+ }\r
+\r
+ Vector2 operator/(T s) const {\r
+ T rcp = T(1) / s;\r
+ return Vector2(x * rcp, y * rcp);\r
+ }\r
+ Vector2& operator/=(T s) {\r
+ T rcp = T(1) / s;\r
+ x *= rcp;\r
+ y *= rcp;\r
+ return *this;\r
+ }\r
+\r
+ static Vector2 Min(const Vector2& a, const Vector2& b) {\r
+ return Vector2((a.x < b.x) ? a.x : b.x, (a.y < b.y) ? a.y : b.y);\r
+ }\r
+ static Vector2 Max(const Vector2& a, const Vector2& b) {\r
+ return Vector2((a.x > b.x) ? a.x : b.x, (a.y > b.y) ? a.y : b.y);\r
+ }\r
+\r
+ Vector2 Clamped(T maxMag) const {\r
+ T magSquared = LengthSq();\r
+ if (magSquared <= Sqr(maxMag))\r
+ return *this;\r
+ else\r
+ return *this * (maxMag / sqrt(magSquared));\r
+ }\r
+\r
+ // Compare two vectors for equality with tolerance. Returns true if vectors match within\r
+ // tolerance.\r
+ bool IsEqual(const Vector2& b, T tolerance = Math<T>::Tolerance()) const {\r
+ return (fabs(b.x - x) <= tolerance) && (fabs(b.y - y) <= tolerance);\r
+ }\r
+ bool Compare(const Vector2& b, T tolerance = Math<T>::Tolerance()) const {\r
+ return IsEqual(b, tolerance);\r
+ }\r
+\r
+ // Access element by index\r
+ T& operator[](int idx) {\r
+ OVR_MATH_ASSERT(0 <= idx && idx < 2);\r
+ return *(&x + idx);\r
+ }\r
+ const T& operator[](int idx) const {\r
+ OVR_MATH_ASSERT(0 <= idx && idx < 2);\r
+ return *(&x + idx);\r
+ }\r
+\r
+ // Entry-wise product of two vectors\r
+ Vector2 EntrywiseMultiply(const Vector2& b) const {\r
+ return Vector2(x * b.x, y * b.y);\r
+ }\r
+\r
+ // Multiply and divide operators do entry-wise math. Used Dot() for dot product.\r
+ Vector2 operator*(const Vector2& b) const {\r
+ return Vector2(x * b.x, y * b.y);\r
+ }\r
+ Vector2 operator/(const Vector2& b) const {\r
+ return Vector2(x / b.x, y / b.y);\r
+ }\r
+\r
+ // Dot product\r
+ // Used to calculate angle q between two vectors among other things,\r
+ // as (A dot B) = |a||b|cos(q).\r
+ T Dot(const Vector2& b) const {\r
+ return x * b.x + y * b.y;\r
+ }\r
+\r
+ // Returns the angle from this vector to b, in radians.\r
+ T Angle(const Vector2& b) const {\r
+ T div = LengthSq() * b.LengthSq();\r
+ OVR_MATH_ASSERT(div != T(0));\r
+ T result = Acos((this->Dot(b)) / sqrt(div));\r
+ return result;\r
+ }\r
+\r
+ // Return Length of the vector squared.\r
+ T LengthSq() const {\r
+ return (x * x + y * y);\r
+ }\r
+\r
+ // Return vector length.\r
+ T Length() const {\r
+ return sqrt(LengthSq());\r
+ }\r
+\r
+ // Returns squared distance between two points represented by vectors.\r
+ T DistanceSq(const Vector2& b) const {\r
+ return (*this - b).LengthSq();\r
+ }\r
+\r
+ // Returns distance between two points represented by vectors.\r
+ T Distance(const Vector2& b) const {\r
+ return (*this - b).Length();\r
+ }\r
+\r
+ // Determine if this a unit vector.\r
+ bool IsNormalized() const {\r
+ return fabs(LengthSq() - T(1)) < Math<T>::Tolerance();\r
+ }\r
+\r
+ // Normalize, convention vector length to 1.\r
+ void Normalize() {\r
+ T s = Length();\r
+ if (s != T(0))\r
+ s = T(1) / s;\r
+ *this *= s;\r
+ }\r
+\r
+ // Returns normalized (unit) version of the vector without modifying itself.\r
+ Vector2 Normalized() const {\r
+ T s = Length();\r
+ if (s != T(0))\r
+ s = T(1) / s;\r
+ return *this * s;\r
+ }\r
+\r
+ // Linearly interpolates from this vector to another.\r
+ // Factor should be between 0.0 and 1.0, with 0 giving full value to this.\r
+ Vector2 Lerp(const Vector2& b, T f) const {\r
+ return *this * (T(1) - f) + b * f;\r
+ }\r
+\r
+ // Projects this vector onto the argument; in other words,\r
+ // A.Project(B) returns projection of vector A onto B.\r
+ Vector2 ProjectTo(const Vector2& b) const {\r
+ T l2 = b.LengthSq();\r
+ OVR_MATH_ASSERT(l2 != T(0));\r
+ return b * (Dot(b) / l2);\r
+ }\r
+\r
+ // returns true if vector b is clockwise from this vector\r
+ bool IsClockwise(const Vector2& b) const {\r
+ return (x * b.y - y * b.x) < 0;\r
+ }\r
+};\r
+\r
+typedef Vector2<float> Vector2f;\r
+typedef Vector2<double> Vector2d;\r
+typedef Vector2<int> Vector2i;\r
+\r
+typedef Vector2<float> Point2f;\r
+typedef Vector2<double> Point2d;\r
+typedef Vector2<int> Point2i;\r
+\r
+//-------------------------------------------------------------------------------------\r
+// ***** Vector3<> - 3D vector of {x, y, z}\r
+\r
+//\r
+// Vector3f (Vector3d) represents a 3-dimensional vector or point in space,\r
+// consisting of coordinates x, y and z.\r
+\r
+template <class T>\r
+class Vector3 {\r
+ public:\r
+ typedef T ElementType;\r
+ static const size_t ElementCount = 3;\r
+\r
+ T x, y, z;\r
+\r
+ // FIXME: default initialization of a vector class can be very expensive in a full-blown\r
+ // application. A few hundred thousand vector constructions is not unlikely and can add\r
+ // up to milliseconds of time on processors like the PS3 PPU.\r
+ Vector3() : x(0), y(0), z(0) {}\r
+ Vector3(T x_, T y_, T z_ = 0) : x(x_), y(y_), z(z_) {}\r
+ explicit Vector3(T s) : x(s), y(s), z(s) {}\r
+ explicit Vector3(const Vector3<typename Math<T>::OtherFloatType>& src)\r
+ : x((T)src.x), y((T)src.y), z((T)src.z) {}\r
+\r
+ static Vector3 Zero() {\r
+ return Vector3(0, 0, 0);\r
+ }\r
+\r
+ // C-interop support.\r
+ typedef typename CompatibleTypes<Vector3<T>>::Type CompatibleType;\r
+\r
+ Vector3(const CompatibleType& s) : x(s.x), y(s.y), z(s.z) {}\r
+\r
+ operator const CompatibleType&() const {\r
+ OVR_MATH_STATIC_ASSERT(\r
+ sizeof(Vector3<T>) == sizeof(CompatibleType), "sizeof(Vector3<T>) failure");\r
+ return reinterpret_cast<const CompatibleType&>(*this);\r
+ }\r
+\r
+ bool operator==(const Vector3& b) const {\r
+ return x == b.x && y == b.y && z == b.z;\r
+ }\r
+ bool operator!=(const Vector3& b) const {\r
+ return x != b.x || y != b.y || z != b.z;\r
+ }\r
+\r
+ Vector3 operator+(const Vector3& b) const {\r
+ return Vector3(x + b.x, y + b.y, z + b.z);\r
+ }\r
+ Vector3& operator+=(const Vector3& b) {\r
+ x += b.x;\r
+ y += b.y;\r
+ z += b.z;\r
+ return *this;\r
+ }\r
+ Vector3 operator-(const Vector3& b) const {\r
+ return Vector3(x - b.x, y - b.y, z - b.z);\r
+ }\r
+ Vector3& operator-=(const Vector3& b) {\r
+ x -= b.x;\r
+ y -= b.y;\r
+ z -= b.z;\r
+ return *this;\r
+ }\r
+ Vector3 operator-() const {\r
+ return Vector3(-x, -y, -z);\r
+ }\r
+\r
+ // Scalar multiplication/division scales vector.\r
+ Vector3 operator*(T s) const {\r
+ return Vector3(x * s, y * s, z * s);\r
+ }\r
+ Vector3& operator*=(T s) {\r
+ x *= s;\r
+ y *= s;\r
+ z *= s;\r
+ return *this;\r
+ }\r
+\r
+ Vector3 operator/(T s) const {\r
+ T rcp = T(1) / s;\r
+ return Vector3(x * rcp, y * rcp, z * rcp);\r
+ }\r
+ Vector3& operator/=(T s) {\r
+ T rcp = T(1) / s;\r
+ x *= rcp;\r
+ y *= rcp;\r
+ z *= rcp;\r
+ return *this;\r
+ }\r
+\r
+ static Vector3 Min(const Vector3& a, const Vector3& b) {\r
+ return Vector3((a.x < b.x) ? a.x : b.x, (a.y < b.y) ? a.y : b.y, (a.z < b.z) ? a.z : b.z);\r
+ }\r
+ static Vector3 Max(const Vector3& a, const Vector3& b) {\r
+ return Vector3((a.x > b.x) ? a.x : b.x, (a.y > b.y) ? a.y : b.y, (a.z > b.z) ? a.z : b.z);\r
+ }\r
+\r
+ Vector3 Clamped(T maxMag) const {\r
+ T magSquared = LengthSq();\r
+ if (magSquared <= Sqr(maxMag))\r
+ return *this;\r
+ else\r
+ return *this * (maxMag / sqrt(magSquared));\r
+ }\r
+\r
+ // Compare two vectors for equality with tolerance. Returns true if vectors match within\r
+ // tolerance.\r
+ bool IsEqual(const Vector3& b, T tolerance = Math<T>::Tolerance()) const {\r
+ return (fabs(b.x - x) <= tolerance) && (fabs(b.y - y) <= tolerance) &&\r
+ (fabs(b.z - z) <= tolerance);\r
+ }\r
+ bool Compare(const Vector3& b, T tolerance = Math<T>::Tolerance()) const {\r
+ return IsEqual(b, tolerance);\r
+ }\r
+\r
+ T& operator[](int idx) {\r
+ OVR_MATH_ASSERT(0 <= idx && idx < 3);\r
+ return *(&x + idx);\r
+ }\r
+\r
+ const T& operator[](int idx) const {\r
+ OVR_MATH_ASSERT(0 <= idx && idx < 3);\r
+ return *(&x + idx);\r
+ }\r
+\r
+ // Entrywise product of two vectors\r
+ Vector3 EntrywiseMultiply(const Vector3& b) const {\r
+ return Vector3(x * b.x, y * b.y, z * b.z);\r
+ }\r
+\r
+ // Multiply and divide operators do entry-wise math\r
+ Vector3 operator*(const Vector3& b) const {\r
+ return Vector3(x * b.x, y * b.y, z * b.z);\r
+ }\r
+\r
+ Vector3 operator/(const Vector3& b) const {\r
+ return Vector3(x / b.x, y / b.y, z / b.z);\r
+ }\r
+\r
+ // Dot product\r
+ // Used to calculate angle q between two vectors among other things,\r
+ // as (A dot B) = |a||b|cos(q).\r
+ T Dot(const Vector3& b) const {\r
+ return x * b.x + y * b.y + z * b.z;\r
+ }\r
+\r
+ // Compute cross product, which generates a normal vector.\r
+ // Direction vector can be determined by right-hand rule: Pointing index finder in\r
+ // direction a and middle finger in direction b, thumb will point in a.Cross(b).\r
+ Vector3 Cross(const Vector3& b) const {\r
+ return Vector3(y * b.z - z * b.y, z * b.x - x * b.z, x * b.y - y * b.x);\r
+ }\r
+\r
+ // Returns the angle from this vector to b, in radians.\r
+ T Angle(const Vector3& b) const {\r
+ T div = LengthSq() * b.LengthSq();\r
+ OVR_MATH_ASSERT(div != T(0));\r
+ T result = Acos((this->Dot(b)) / sqrt(div));\r
+ return result;\r
+ }\r
+\r
+ // Return Length of the vector squared.\r
+ T LengthSq() const {\r
+ return (x * x + y * y + z * z);\r
+ }\r
+\r
+ // Return vector length.\r
+ T Length() const {\r
+ return (T)sqrt(LengthSq());\r
+ }\r
+\r
+ // Returns squared distance between two points represented by vectors.\r
+ T DistanceSq(Vector3 const& b) const {\r
+ return (*this - b).LengthSq();\r
+ }\r
+\r
+ // Returns distance between two points represented by vectors.\r
+ T Distance(Vector3 const& b) const {\r
+ return (*this - b).Length();\r
+ }\r
+\r
+ bool IsNormalized() const {\r
+ return fabs(LengthSq() - T(1)) < Math<T>::Tolerance();\r
+ }\r
+\r
+ // Normalize, convention vector length to 1.\r
+ void Normalize() {\r
+ T s = Length();\r
+ if (s != T(0))\r
+ s = T(1) / s;\r
+ *this *= s;\r
+ }\r
+\r
+ // Returns normalized (unit) version of the vector without modifying itself.\r
+ Vector3 Normalized() const {\r
+ T s = Length();\r
+ if (s != T(0))\r
+ s = T(1) / s;\r
+ return *this * s;\r
+ }\r
+\r
+ // Linearly interpolates from this vector to another.\r
+ // Factor should be between 0.0 and 1.0, with 0 giving full value to this.\r
+ Vector3 Lerp(const Vector3& b, T f) const {\r
+ return *this * (T(1) - f) + b * f;\r
+ }\r
+\r
+ // Projects this vector onto the argument; in other words,\r
+ // A.Project(B) returns projection of vector A onto B.\r
+ Vector3 ProjectTo(const Vector3& b) const {\r
+ T l2 = b.LengthSq();\r
+ OVR_MATH_ASSERT(l2 != T(0));\r
+ return b * (Dot(b) / l2);\r
+ }\r
+\r
+ // Projects this vector onto a plane defined by a normal vector\r
+ Vector3 ProjectToPlane(const Vector3& normal) const {\r
+ return *this - this->ProjectTo(normal);\r
+ }\r
+\r
+ bool IsNan() const {\r
+ return !isfinite(x + y + z);\r
+ }\r
+ bool IsFinite() const {\r
+ return isfinite(x + y + z);\r
+ }\r
+};\r
+\r
+typedef Vector3<float> Vector3f;\r
+typedef Vector3<double> Vector3d;\r
+typedef Vector3<int32_t> Vector3i;\r
+\r
+OVR_MATH_STATIC_ASSERT((sizeof(Vector3f) == 3 * sizeof(float)), "sizeof(Vector3f) failure");\r
+OVR_MATH_STATIC_ASSERT((sizeof(Vector3d) == 3 * sizeof(double)), "sizeof(Vector3d) failure");\r
+OVR_MATH_STATIC_ASSERT((sizeof(Vector3i) == 3 * sizeof(int32_t)), "sizeof(Vector3i) failure");\r
+\r
+typedef Vector3<float> Point3f;\r
+typedef Vector3<double> Point3d;\r
+typedef Vector3<int32_t> Point3i;\r
+\r
+//-------------------------------------------------------------------------------------\r
+// ***** Vector4<> - 4D vector of {x, y, z, w}\r
+\r
+//\r
+// Vector4f (Vector4d) represents a 3-dimensional vector or point in space,\r
+// consisting of coordinates x, y, z and w.\r
+\r
+template <class T>\r
+class Vector4 {\r
+ public:\r
+ typedef T ElementType;\r
+ static const size_t ElementCount = 4;\r
+\r
+ T x, y, z, w;\r
+\r
+ // FIXME: default initialization of a vector class can be very expensive in a full-blown\r
+ // application. A few hundred thousand vector constructions is not unlikely and can add\r
+ // up to milliseconds of time on processors like the PS3 PPU.\r
+ Vector4() : x(0), y(0), z(0), w(0) {}\r
+ Vector4(T x_, T y_, T z_, T w_) : x(x_), y(y_), z(z_), w(w_) {}\r
+ explicit Vector4(T s) : x(s), y(s), z(s), w(s) {}\r
+ explicit Vector4(const Vector3<T>& v, const T w_ = T(1)) : x(v.x), y(v.y), z(v.z), w(w_) {}\r
+ explicit Vector4(const Vector4<typename Math<T>::OtherFloatType>& src)\r
+ : x((T)src.x), y((T)src.y), z((T)src.z), w((T)src.w) {}\r
+\r
+ static Vector4 Zero() {\r
+ return Vector4(0, 0, 0, 0);\r
+ }\r
+\r
+ // C-interop support.\r
+ typedef typename CompatibleTypes<Vector4<T>>::Type CompatibleType;\r
+\r
+ Vector4(const CompatibleType& s) : x(s.x), y(s.y), z(s.z), w(s.w) {}\r
+\r
+ operator const CompatibleType&() const {\r
+ OVR_MATH_STATIC_ASSERT(\r
+ sizeof(Vector4<T>) == sizeof(CompatibleType), "sizeof(Vector4<T>) failure");\r
+ return reinterpret_cast<const CompatibleType&>(*this);\r
+ }\r
+\r
+ Vector4& operator=(const Vector3<T>& other) {\r
+ x = other.x;\r
+ y = other.y;\r
+ z = other.z;\r
+ w = 1;\r
+ return *this;\r
+ }\r
+ bool operator==(const Vector4& b) const {\r
+ return x == b.x && y == b.y && z == b.z && w == b.w;\r
+ }\r
+ bool operator!=(const Vector4& b) const {\r
+ return x != b.x || y != b.y || z != b.z || w != b.w;\r
+ }\r
+\r
+ Vector4 operator+(const Vector4& b) const {\r
+ return Vector4(x + b.x, y + b.y, z + b.z, w + b.w);\r
+ }\r
+ Vector4& operator+=(const Vector4& b) {\r
+ x += b.x;\r
+ y += b.y;\r
+ z += b.z;\r
+ w += b.w;\r
+ return *this;\r
+ }\r
+ Vector4 operator-(const Vector4& b) const {\r
+ return Vector4(x - b.x, y - b.y, z - b.z, w - b.w);\r
+ }\r
+ Vector4& operator-=(const Vector4& b) {\r
+ x -= b.x;\r
+ y -= b.y;\r
+ z -= b.z;\r
+ w -= b.w;\r
+ return *this;\r
+ }\r
+ Vector4 operator-() const {\r
+ return Vector4(-x, -y, -z, -w);\r
+ }\r
+\r
+ // Scalar multiplication/division scales vector.\r
+ Vector4 operator*(T s) const {\r
+ return Vector4(x * s, y * s, z * s, w * s);\r
+ }\r
+ Vector4& operator*=(T s) {\r
+ x *= s;\r
+ y *= s;\r
+ z *= s;\r
+ w *= s;\r
+ return *this;\r
+ }\r
+\r
+ Vector4 operator/(T s) const {\r
+ T rcp = T(1) / s;\r
+ return Vector4(x * rcp, y * rcp, z * rcp, w * rcp);\r
+ }\r
+ Vector4& operator/=(T s) {\r
+ T rcp = T(1) / s;\r
+ x *= rcp;\r
+ y *= rcp;\r
+ z *= rcp;\r
+ w *= rcp;\r
+ return *this;\r
+ }\r
+\r
+ static Vector4 Min(const Vector4& a, const Vector4& b) {\r
+ return Vector4(\r
+ (a.x < b.x) ? a.x : b.x,\r
+ (a.y < b.y) ? a.y : b.y,\r
+ (a.z < b.z) ? a.z : b.z,\r
+ (a.w < b.w) ? a.w : b.w);\r
+ }\r
+ static Vector4 Max(const Vector4& a, const Vector4& b) {\r
+ return Vector4(\r
+ (a.x > b.x) ? a.x : b.x,\r
+ (a.y > b.y) ? a.y : b.y,\r
+ (a.z > b.z) ? a.z : b.z,\r
+ (a.w > b.w) ? a.w : b.w);\r
+ }\r
+\r
+ Vector4 Clamped(T maxMag) const {\r
+ T magSquared = LengthSq();\r
+ if (magSquared <= Sqr(maxMag))\r
+ return *this;\r
+ else\r
+ return *this * (maxMag / sqrt(magSquared));\r
+ }\r
+\r
+ // Compare two vectors for equality with tolerance. Returns true if vectors match within\r
+ // tolerance.\r
+ bool IsEqual(const Vector4& b, T tolerance = Math<T>::Tolerance()) const {\r
+ return (fabs(b.x - x) <= tolerance) && (fabs(b.y - y) <= tolerance) &&\r
+ (fabs(b.z - z) <= tolerance) && (fabs(b.w - w) <= tolerance);\r
+ }\r
+ bool Compare(const Vector4& b, T tolerance = Math<T>::Tolerance()) const {\r
+ return IsEqual(b, tolerance);\r
+ }\r
+\r
+ T& operator[](int idx) {\r
+ OVR_MATH_ASSERT(0 <= idx && idx < 4);\r
+ return *(&x + idx);\r
+ }\r
+\r
+ const T& operator[](int idx) const {\r
+ OVR_MATH_ASSERT(0 <= idx && idx < 4);\r
+ return *(&x + idx);\r
+ }\r
+\r
+ // Entry wise product of two vectors\r
+ Vector4 EntrywiseMultiply(const Vector4& b) const {\r
+ return Vector4(x * b.x, y * b.y, z * b.z, w * b.w);\r
+ }\r
+\r
+ // Multiply and divide operators do entry-wise math\r
+ Vector4 operator*(const Vector4& b) const {\r
+ return Vector4(x * b.x, y * b.y, z * b.z, w * b.w);\r
+ }\r
+\r
+ Vector4 operator/(const Vector4& b) const {\r
+ return Vector4(x / b.x, y / b.y, z / b.z, w / b.w);\r
+ }\r
+\r
+ // Dot product\r
+ T Dot(const Vector4& b) const {\r
+ return x * b.x + y * b.y + z * b.z + w * b.w;\r
+ }\r
+\r
+ // Return Length of the vector squared.\r
+ T LengthSq() const {\r
+ return (x * x + y * y + z * z + w * w);\r
+ }\r
+\r
+ // Return vector length.\r
+ T Length() const {\r
+ return sqrt(LengthSq());\r
+ }\r
+\r
+ bool IsNormalized() const {\r
+ return fabs(LengthSq() - T(1)) < Math<T>::Tolerance();\r
+ }\r
+\r
+ // Normalize, convention vector length to 1.\r
+ void Normalize() {\r
+ T s = Length();\r
+ if (s != T(0))\r
+ s = T(1) / s;\r
+ *this *= s;\r
+ }\r
+\r
+ // Returns normalized (unit) version of the vector without modifying itself.\r
+ Vector4 Normalized() const {\r
+ T s = Length();\r
+ if (s != T(0))\r
+ s = T(1) / s;\r
+ return *this * s;\r
+ }\r
+\r
+ // Linearly interpolates from this vector to another.\r
+ // Factor should be between 0.0 and 1.0, with 0 giving full value to this.\r
+ Vector4 Lerp(const Vector4& b, T f) const {\r
+ return *this * (T(1) - f) + b * f;\r
+ }\r
+};\r
+\r
+typedef Vector4<float> Vector4f;\r
+typedef Vector4<double> Vector4d;\r
+typedef Vector4<int> Vector4i;\r
+\r
+//-------------------------------------------------------------------------------------\r
+// ***** Bounds3\r
+\r
+// Bounds class used to describe a 3D axis aligned bounding box.\r
+\r
+template <class T>\r
+class Bounds3 {\r
+ public:\r
+ Vector3<T> b[2];\r
+\r
+ Bounds3() {\r
+ Clear();\r
+ }\r
+\r
+ Bounds3(const Vector3<T>& mins, const Vector3<T>& maxs) {\r
+ b[0] = mins;\r
+ b[1] = maxs;\r
+ }\r
+\r
+ void Clear() {\r
+ b[0].x = b[0].y = b[0].z = Math<T>::MaxValue();\r
+ b[1].x = b[1].y = b[1].z = -Math<T>::MaxValue();\r
+ }\r
+\r
+ void AddPoint(const Vector3<T>& v) {\r
+ b[0].x = (b[0].x < v.x ? b[0].x : v.x);\r
+ b[0].y = (b[0].y < v.y ? b[0].y : v.y);\r
+ b[0].z = (b[0].z < v.z ? b[0].z : v.z);\r
+ b[1].x = (v.x < b[1].x ? b[1].x : v.x);\r
+ b[1].y = (v.y < b[1].y ? b[1].y : v.y);\r
+ b[1].z = (v.z < b[1].z ? b[1].z : v.z);\r
+ }\r
+\r
+ bool Excludes(const Vector3<T>& v) const {\r
+ bool testing = false;\r
+ for (int32_t t = 0; t < 3; ++t) {\r
+ testing |= v[t] > b[1][t];\r
+ testing |= v[t] < b[0][t];\r
+ }\r
+ return testing;\r
+ }\r
+\r
+ // exludes, ignoring vertical\r
+ bool ExcludesXZ(const Vector3<T>& v) const {\r
+ bool testing = false;\r
+ testing |= v[0] > b[1][0];\r
+ testing |= v[0] < b[0][0];\r
+ testing |= v[2] > b[1][2];\r
+ testing |= v[2] < b[0][2];\r
+ return testing;\r
+ }\r
+\r
+ bool Excludes(const Bounds3<T>& bounds) const {\r
+ bool testing = false;\r
+ for (int32_t t = 0; t < 3; ++t) {\r
+ testing |= bounds.b[0][t] > b[1][t];\r
+ testing |= bounds.b[1][t] < b[0][t];\r
+ }\r
+ return testing;\r
+ }\r
+\r
+ const Vector3<T>& GetMins() const {\r
+ return b[0];\r
+ }\r
+ const Vector3<T>& GetMaxs() const {\r
+ return b[1];\r
+ }\r
+\r
+ Vector3<T>& GetMins() {\r
+ return b[0];\r
+ }\r
+ Vector3<T>& GetMaxs() {\r
+ return b[1];\r
+ }\r
+};\r
+\r
+typedef Bounds3<float> Bounds3f;\r
+typedef Bounds3<double> Bounds3d;\r
+\r
+//-------------------------------------------------------------------------------------\r
+// ***** Size\r
+\r
+// Size class represents 2D size with Width, Height components.\r
+// Used to describe distentions of render targets, etc.\r
+\r
+template <class T>\r
+class Size {\r
+ public:\r
+ T w, h;\r
+\r
+ Size() : w(0), h(0) {}\r
+ Size(T w_, T h_) : w(w_), h(h_) {}\r
+ explicit Size(T s) : w(s), h(s) {}\r
+ explicit Size(const Size<typename Math<T>::OtherFloatType>& src) : w((T)src.w), h((T)src.h) {}\r
+\r
+ // C-interop support.\r
+ typedef typename CompatibleTypes<Size<T>>::Type CompatibleType;\r
+\r
+ Size(const CompatibleType& s) : w(s.w), h(s.h) {}\r
+\r
+ operator const CompatibleType&() const {\r
+ OVR_MATH_STATIC_ASSERT(sizeof(Size<T>) == sizeof(CompatibleType), "sizeof(Size<T>) failure");\r
+ return reinterpret_cast<const CompatibleType&>(*this);\r
+ }\r
+\r
+ bool operator==(const Size& b) const {\r
+ return w == b.w && h == b.h;\r
+ }\r
+ bool operator!=(const Size& b) const {\r
+ return w != b.w || h != b.h;\r
+ }\r
+\r
+ Size operator+(const Size& b) const {\r
+ return Size(w + b.w, h + b.h);\r
+ }\r
+ Size& operator+=(const Size& b) {\r
+ w += b.w;\r
+ h += b.h;\r
+ return *this;\r
+ }\r
+ Size operator-(const Size& b) const {\r
+ return Size(w - b.w, h - b.h);\r
+ }\r
+ Size& operator-=(const Size& b) {\r
+ w -= b.w;\r
+ h -= b.h;\r
+ return *this;\r
+ }\r
+ Size operator-() const {\r
+ return Size(-w, -h);\r
+ }\r
+ Size operator*(const Size& b) const {\r
+ return Size(w * b.w, h * b.h);\r
+ }\r
+ Size& operator*=(const Size& b) {\r
+ w *= b.w;\r
+ h *= b.h;\r
+ return *this;\r
+ }\r
+ Size operator/(const Size& b) const {\r
+ return Size(w / b.w, h / b.h);\r
+ }\r
+ Size& operator/=(const Size& b) {\r
+ w /= b.w;\r
+ h /= b.h;\r
+ return *this;\r
+ }\r
+\r
+ // Scalar multiplication/division scales both components.\r
+ Size operator*(T s) const {\r
+ return Size(w * s, h * s);\r
+ }\r
+ Size& operator*=(T s) {\r
+ w *= s;\r
+ h *= s;\r
+ return *this;\r
+ }\r
+ Size operator/(T s) const {\r
+ return Size(w / s, h / s);\r
+ }\r
+ Size& operator/=(T s) {\r
+ w /= s;\r
+ h /= s;\r
+ return *this;\r
+ }\r
+\r
+ static Size Min(const Size& a, const Size& b) {\r
+ return Size((a.w < b.w) ? a.w : b.w, (a.h < b.h) ? a.h : b.h);\r
+ }\r
+ static Size Max(const Size& a, const Size& b) {\r
+ return Size((a.w > b.w) ? a.w : b.w, (a.h > b.h) ? a.h : b.h);\r
+ }\r
+\r
+ T Area() const {\r
+ return w * h;\r
+ }\r
+\r
+ inline Vector2<T> ToVector() const {\r
+ return Vector2<T>(w, h);\r
+ }\r
+};\r
+\r
+typedef Size<int> Sizei;\r
+typedef Size<unsigned> Sizeu;\r
+typedef Size<float> Sizef;\r
+typedef Size<double> Sized;\r
+\r
+//-----------------------------------------------------------------------------------\r
+// ***** Rect\r
+\r
+// Rect describes a rectangular area for rendering, that includes position and size.\r
+template <class T>\r
+class Rect {\r
+ public:\r
+ T x, y;\r
+ T w, h;\r
+\r
+ Rect() {}\r
+ Rect(T x1, T y1, T w1, T h1) : x(x1), y(y1), w(w1), h(h1) {}\r
+ Rect(const Vector2<T>& pos, const Size<T>& sz) : x(pos.x), y(pos.y), w(sz.w), h(sz.h) {}\r
+ Rect(const Size<T>& sz) : x(0), y(0), w(sz.w), h(sz.h) {}\r
+\r
+ // C-interop support.\r
+ typedef typename CompatibleTypes<Rect<T>>::Type CompatibleType;\r
+\r
+ Rect(const CompatibleType& s) : x(s.Pos.x), y(s.Pos.y), w(s.Size.w), h(s.Size.h) {}\r
+\r
+ operator const CompatibleType&() const {\r
+ OVR_MATH_STATIC_ASSERT(sizeof(Rect<T>) == sizeof(CompatibleType), "sizeof(Rect<T>) failure");\r
+ return reinterpret_cast<const CompatibleType&>(*this);\r
+ }\r
+\r
+ Vector2<T> GetPos() const {\r
+ return Vector2<T>(x, y);\r
+ }\r
+ Size<T> GetSize() const {\r
+ return Size<T>(w, h);\r
+ }\r
+ void SetPos(const Vector2<T>& pos) {\r
+ x = pos.x;\r
+ y = pos.y;\r
+ }\r
+ void SetSize(const Size<T>& sz) {\r
+ w = sz.w;\r
+ h = sz.h;\r
+ }\r
+\r
+ bool operator==(const Rect& vp) const {\r
+ return (x == vp.x) && (y == vp.y) && (w == vp.w) && (h == vp.h);\r
+ }\r
+ bool operator!=(const Rect& vp) const {\r
+ return !operator==(vp);\r
+ }\r
+};\r
+\r
+typedef Rect<int> Recti;\r
+\r
+//-------------------------------------------------------------------------------------//\r
+// ***** Quat\r
+//\r
+// Quatf represents a quaternion class used for rotations.\r
+//\r
+// Quaternion multiplications are done in right-to-left order, to match the\r
+// behavior of matrices.\r
+\r
+template <class T>\r
+class Quat {\r
+ public:\r
+ typedef T ElementType;\r
+ static const size_t ElementCount = 4;\r
+\r
+ // x,y,z = axis*sin(angle), w = cos(angle)\r
+ T x, y, z, w;\r
+\r
+ Quat() : x(0), y(0), z(0), w(1) {}\r
+ Quat(T x_, T y_, T z_, T w_) : x(x_), y(y_), z(z_), w(w_) {}\r
+ explicit Quat(const Quat<typename Math<T>::OtherFloatType>& src)\r
+ : x((T)src.x), y((T)src.y), z((T)src.z), w((T)src.w) {\r
+ // NOTE: Converting a normalized Quat<float> to Quat<double>\r
+ // will generally result in an un-normalized quaternion.\r
+ // But we don't normalize here in case the quaternion\r
+ // being converted is not a normalized rotation quaternion.\r
+ }\r
+\r
+ typedef typename CompatibleTypes<Quat<T>>::Type CompatibleType;\r
+\r
+ // C-interop support.\r
+ Quat(const CompatibleType& s) : x(s.x), y(s.y), z(s.z), w(s.w) {}\r
+\r
+ operator CompatibleType() const {\r
+ CompatibleType result;\r
+ result.x = x;\r
+ result.y = y;\r
+ result.z = z;\r
+ result.w = w;\r
+ return result;\r
+ }\r
+\r
+ // Constructs quaternion for rotation around the axis by an angle.\r
+ Quat(const Vector3<T>& axis, T angle) {\r
+ // Make sure we don't divide by zero.\r
+ if (axis.LengthSq() == T(0)) {\r
+ // Assert if the axis is zero, but the angle isn't\r
+ OVR_MATH_ASSERT(angle == T(0));\r
+ x = y = z = T(0);\r
+ w = T(1);\r
+ return;\r
+ }\r
+\r
+ Vector3<T> unitAxis = axis.Normalized();\r
+ T sinHalfAngle = sin(angle * T(0.5));\r
+\r
+ w = cos(angle * T(0.5));\r
+ x = unitAxis.x * sinHalfAngle;\r
+ y = unitAxis.y * sinHalfAngle;\r
+ z = unitAxis.z * sinHalfAngle;\r
+ }\r
+\r
+ // Constructs quaternion for rotation around one of the coordinate axis by an angle.\r
+ Quat(Axis A, T angle, RotateDirection d = Rotate_CCW, HandedSystem s = Handed_R) {\r
+ T sinHalfAngle = s * d * sin(angle * T(0.5));\r
+ T v[3];\r
+ v[0] = v[1] = v[2] = T(0);\r
+ v[A] = sinHalfAngle;\r
+\r
+ w = cos(angle * T(0.5));\r
+ x = v[0];\r
+ y = v[1];\r
+ z = v[2];\r
+ }\r
+\r
+ Quat operator-() {\r
+ return Quat(-x, -y, -z, -w);\r
+ } // unary minus\r
+\r
+ static Quat Identity() {\r
+ return Quat(0, 0, 0, 1);\r
+ }\r
+\r
+ // Compute axis and angle from quaternion\r
+ void GetAxisAngle(Vector3<T>* axis, T* angle) const {\r
+ if (x * x + y * y + z * z > Math<T>::Tolerance() * Math<T>::Tolerance()) {\r
+ *axis = Vector3<T>(x, y, z).Normalized();\r
+ *angle = 2 * Acos(w);\r
+ if (*angle > ((T)MATH_DOUBLE_PI)) // Reduce the magnitude of the angle, if necessary\r
+ {\r
+ *angle = ((T)MATH_DOUBLE_TWOPI) - *angle;\r
+ *axis = *axis * (-1);\r
+ }\r
+ } else {\r
+ *axis = Vector3<T>(1, 0, 0);\r
+ *angle = T(0);\r
+ }\r
+ }\r
+\r
+ // Convert a quaternion to a rotation vector, also known as\r
+ // Rodrigues vector, AxisAngle vector, SORA vector, exponential map.\r
+ // A rotation vector describes a rotation about an axis:\r
+ // the axis of rotation is the vector normalized,\r
+ // the angle of rotation is the magnitude of the vector.\r
+ Vector3<T> ToRotationVector() const {\r
+ // OVR_MATH_ASSERT(IsNormalized()); // If this fires, caller has a quat math bug\r
+ T s = T(0);\r
+ T sinHalfAngle = sqrt(x * x + y * y + z * z);\r
+ if (sinHalfAngle > T(0)) {\r
+ T cosHalfAngle = w;\r
+ T halfAngle = atan2(sinHalfAngle, cosHalfAngle);\r
+\r
+ // Ensure minimum rotation magnitude\r
+ if (cosHalfAngle < 0)\r
+ halfAngle -= T(MATH_DOUBLE_PI);\r
+\r
+ s = T(2) * halfAngle / sinHalfAngle;\r
+ }\r
+ return Vector3<T>(x * s, y * s, z * s);\r
+ }\r
+\r
+ // Faster version of the above, optimized for use with small rotations, where rotation angle ~=\r
+ // sin(angle)\r
+ inline OVR::Vector3<T> FastToRotationVector() const {\r
+ OVR_MATH_ASSERT(IsNormalized()); // If this fires, caller has a quat math bug\r
+ T s;\r
+ T sinHalfSquared = x * x + y * y + z * z;\r
+ if (sinHalfSquared < T(.0037)) // =~ sin(7/2 degrees)^2\r
+ {\r
+ // Max rotation magnitude error is about .062% at 7 degrees rotation, or about .0043 degrees\r
+ s = T(2) * Sign(w);\r
+ } else {\r
+ T sinHalfAngle = sqrt(sinHalfSquared);\r
+ T cosHalfAngle = w;\r
+ T halfAngle = atan2(sinHalfAngle, cosHalfAngle);\r
+\r
+ // Ensure minimum rotation magnitude\r
+ if (cosHalfAngle < 0)\r
+ halfAngle -= T(MATH_DOUBLE_PI);\r
+\r
+ s = T(2) * halfAngle / sinHalfAngle;\r
+ }\r
+ return Vector3<T>(x * s, y * s, z * s);\r
+ }\r
+\r
+ // Given a rotation vector of form unitRotationAxis * angle,\r
+ // returns the equivalent quaternion (unitRotationAxis * sin(angle), cos(Angle)).\r
+ static Quat FromRotationVector(const Vector3<T>& v) {\r
+ T angleSquared = v.LengthSq();\r
+ T s = T(0);\r
+ T c = T(1);\r
+ if (angleSquared > T(0)) {\r
+ T angle = sqrt(angleSquared);\r
+ s = sin(angle * T(0.5)) / angle; // normalize\r
+ c = cos(angle * T(0.5));\r
+ }\r
+ return Quat(s * v.x, s * v.y, s * v.z, c);\r
+ }\r
+\r
+ // Faster version of above, optimized for use with small rotation magnitudes, where rotation angle\r
+ // =~ sin(angle).\r
+ // If normalize is false, small-angle quaternions are returned un-normalized.\r
+ inline static Quat FastFromRotationVector(const OVR::Vector3<T>& v, bool normalize = true) {\r
+ T s, c;\r
+ T angleSquared = v.LengthSq();\r
+ if (angleSquared < T(0.0076)) // =~ (5 degrees*pi/180)^2\r
+ {\r
+ s = T(0.5);\r
+ c = T(1.0);\r
+ // Max rotation magnitude error (after normalization) is about .064% at 5 degrees rotation, or\r
+ // .0032 degrees\r
+ if (normalize && angleSquared > 0) {\r
+ // sin(angle/2)^2 ~= (angle/2)^2 and cos(angle/2)^2 ~= 1\r
+ T invLen = T(1) / sqrt(angleSquared * T(0.25) + T(1)); // normalize\r
+ s = s * invLen;\r
+ c = c * invLen;\r
+ }\r
+ } else {\r
+ T angle = sqrt(angleSquared);\r
+ s = sin(angle * T(0.5)) / angle;\r
+ c = cos(angle * T(0.5));\r
+ }\r
+ return Quat(s * v.x, s * v.y, s * v.z, c);\r
+ }\r
+\r
+ // Constructs the quaternion from a rotation matrix\r
+ explicit Quat(const Matrix4<T>& m) {\r
+ T trace = m.M[0][0] + m.M[1][1] + m.M[2][2];\r
+\r
+ // In almost all cases, the first part is executed.\r
+ // However, if the trace is not positive, the other\r
+ // cases arise.\r
+ if (trace > T(0)) {\r
+ T s = sqrt(trace + T(1)) * T(2); // s=4*qw\r
+ w = T(0.25) * s;\r
+ x = (m.M[2][1] - m.M[1][2]) / s;\r
+ y = (m.M[0][2] - m.M[2][0]) / s;\r
+ z = (m.M[1][0] - m.M[0][1]) / s;\r
+ } else if ((m.M[0][0] > m.M[1][1]) && (m.M[0][0] > m.M[2][2])) {\r
+ T s = sqrt(T(1) + m.M[0][0] - m.M[1][1] - m.M[2][2]) * T(2);\r
+ w = (m.M[2][1] - m.M[1][2]) / s;\r
+ x = T(0.25) * s;\r
+ y = (m.M[0][1] + m.M[1][0]) / s;\r
+ z = (m.M[2][0] + m.M[0][2]) / s;\r
+ } else if (m.M[1][1] > m.M[2][2]) {\r
+ T s = sqrt(T(1) + m.M[1][1] - m.M[0][0] - m.M[2][2]) * T(2); // S=4*qy\r
+ w = (m.M[0][2] - m.M[2][0]) / s;\r
+ x = (m.M[0][1] + m.M[1][0]) / s;\r
+ y = T(0.25) * s;\r
+ z = (m.M[1][2] + m.M[2][1]) / s;\r
+ } else {\r
+ T s = sqrt(T(1) + m.M[2][2] - m.M[0][0] - m.M[1][1]) * T(2); // S=4*qz\r
+ w = (m.M[1][0] - m.M[0][1]) / s;\r
+ x = (m.M[0][2] + m.M[2][0]) / s;\r
+ y = (m.M[1][2] + m.M[2][1]) / s;\r
+ z = T(0.25) * s;\r
+ }\r
+ OVR_MATH_ASSERT(IsNormalized()); // Ensure input matrix is orthogonal\r
+ }\r
+\r
+ // Constructs the quaternion from a rotation matrix\r
+ explicit Quat(const Matrix3<T>& m) {\r
+ T trace = m.M[0][0] + m.M[1][1] + m.M[2][2];\r
+\r
+ // In almost all cases, the first part is executed.\r
+ // However, if the trace is not positive, the other\r
+ // cases arise.\r
+ if (trace > T(0)) {\r
+ T s = sqrt(trace + T(1)) * T(2); // s=4*qw\r
+ w = T(0.25) * s;\r
+ x = (m.M[2][1] - m.M[1][2]) / s;\r
+ y = (m.M[0][2] - m.M[2][0]) / s;\r
+ z = (m.M[1][0] - m.M[0][1]) / s;\r
+ } else if ((m.M[0][0] > m.M[1][1]) && (m.M[0][0] > m.M[2][2])) {\r
+ T s = sqrt(T(1) + m.M[0][0] - m.M[1][1] - m.M[2][2]) * T(2);\r
+ w = (m.M[2][1] - m.M[1][2]) / s;\r
+ x = T(0.25) * s;\r
+ y = (m.M[0][1] + m.M[1][0]) / s;\r
+ z = (m.M[2][0] + m.M[0][2]) / s;\r
+ } else if (m.M[1][1] > m.M[2][2]) {\r
+ T s = sqrt(T(1) + m.M[1][1] - m.M[0][0] - m.M[2][2]) * T(2); // S=4*qy\r
+ w = (m.M[0][2] - m.M[2][0]) / s;\r
+ x = (m.M[0][1] + m.M[1][0]) / s;\r
+ y = T(0.25) * s;\r
+ z = (m.M[1][2] + m.M[2][1]) / s;\r
+ } else {\r
+ T s = sqrt(T(1) + m.M[2][2] - m.M[0][0] - m.M[1][1]) * T(2); // S=4*qz\r
+ w = (m.M[1][0] - m.M[0][1]) / s;\r
+ x = (m.M[0][2] + m.M[2][0]) / s;\r
+ y = (m.M[1][2] + m.M[2][1]) / s;\r
+ z = T(0.25) * s;\r
+ }\r
+ OVR_MATH_ASSERT(IsNormalized()); // Ensure input matrix is orthogonal\r
+ }\r
+\r
+ // MERGE_MOBILE_SDK\r
+ // Constructs a quaternion that rotates 'from' to line up with 'to'.\r
+ explicit Quat(const Vector3<T>& from, const Vector3<T>& to) {\r
+ const T cx = from.y * to.z - from.z * to.y;\r
+ const T cy = from.z * to.x - from.x * to.z;\r
+ const T cz = from.x * to.y - from.y * to.x;\r
+ const T dot = from.x * to.x + from.y * to.y + from.z * to.z;\r
+ const T crossLengthSq = cx * cx + cy * cy + cz * cz;\r
+ const T magnitude = static_cast<T>(sqrt(crossLengthSq + dot * dot));\r
+ const T cw = dot + magnitude;\r
+ if (cw < Math<T>::SmallestNonDenormal()) {\r
+ const T sx = to.y * to.y + to.z * to.z;\r
+ const T sz = to.x * to.x + to.y * to.y;\r
+ if (sx > sz) {\r
+ const T rcpLength = RcpSqrt(sx);\r
+ x = T(0);\r
+ y = to.z * rcpLength;\r
+ z = -to.y * rcpLength;\r
+ w = T(0);\r
+ } else {\r
+ const T rcpLength = RcpSqrt(sz);\r
+ x = to.y * rcpLength;\r
+ y = -to.x * rcpLength;\r
+ z = T(0);\r
+ w = T(0);\r
+ }\r
+ return;\r
+ }\r
+ const T rcpLength = RcpSqrt(crossLengthSq + cw * cw);\r
+ x = cx * rcpLength;\r
+ y = cy * rcpLength;\r
+ z = cz * rcpLength;\r
+ w = cw * rcpLength;\r
+ }\r
+ // MERGE_MOBILE_SDK\r
+\r
+ bool operator==(const Quat& b) const {\r
+ return x == b.x && y == b.y && z == b.z && w == b.w;\r
+ }\r
+ bool operator!=(const Quat& b) const {\r
+ return x != b.x || y != b.y || z != b.z || w != b.w;\r
+ }\r
+\r
+ Quat operator+(const Quat& b) const {\r
+ return Quat(x + b.x, y + b.y, z + b.z, w + b.w);\r
+ }\r
+ Quat& operator+=(const Quat& b) {\r
+ w += b.w;\r
+ x += b.x;\r
+ y += b.y;\r
+ z += b.z;\r
+ return *this;\r
+ }\r
+ Quat operator-(const Quat& b) const {\r
+ return Quat(x - b.x, y - b.y, z - b.z, w - b.w);\r
+ }\r
+ Quat& operator-=(const Quat& b) {\r
+ w -= b.w;\r
+ x -= b.x;\r
+ y -= b.y;\r
+ z -= b.z;\r
+ return *this;\r
+ }\r
+\r
+ Quat operator*(T s) const {\r
+ return Quat(x * s, y * s, z * s, w * s);\r
+ }\r
+ Quat& operator*=(T s) {\r
+ w *= s;\r
+ x *= s;\r
+ y *= s;\r
+ z *= s;\r
+ return *this;\r
+ }\r
+ Quat operator/(T s) const {\r
+ T rcp = T(1) / s;\r
+ return Quat(x * rcp, y * rcp, z * rcp, w * rcp);\r
+ }\r
+ Quat& operator/=(T s) {\r
+ T rcp = T(1) / s;\r
+ w *= rcp;\r
+ x *= rcp;\r
+ y *= rcp;\r
+ z *= rcp;\r
+ return *this;\r
+ }\r
+\r
+ // MERGE_MOBILE_SDK\r
+ Vector3<T> operator*(const Vector3<T>& v) const {\r
+ return Rotate(v);\r
+ }\r
+ // MERGE_MOBILE_SDK\r
+\r
+ // Compare two quats for equality within tolerance. Returns true if quats match within tolerance.\r
+ bool IsEqual(const Quat& b, T tolerance = Math<T>::Tolerance()) const {\r
+ return Abs(Dot(b)) >= T(1) - tolerance;\r
+ }\r
+\r
+ // Compare two quats for equality within tolerance while checking matching hemispheres. Returns\r
+ // true if quats match within tolerance.\r
+ bool IsEqualMatchHemisphere(Quat b, T tolerance = Math<T>::Tolerance()) const {\r
+ b.EnsureSameHemisphere(*this);\r
+ return Abs(Dot(b)) >= T(1) - tolerance;\r
+ }\r
+\r
+ static T Abs(const T v) {\r
+ return (v >= 0) ? v : -v;\r
+ }\r
+\r
+ // Get Imaginary part vector\r
+ Vector3<T> Imag() const {\r
+ return Vector3<T>(x, y, z);\r
+ }\r
+\r
+ // Get quaternion length.\r
+ T Length() const {\r
+ return sqrt(LengthSq());\r
+ }\r
+\r
+ // Get quaternion length squared.\r
+ T LengthSq() const {\r
+ return (x * x + y * y + z * z + w * w);\r
+ }\r
+\r
+ // Simple Euclidean distance in R^4 (not SLERP distance, but at least respects Haar measure)\r
+ T Distance(const Quat& q) const {\r
+ T d1 = (*this - q).Length();\r
+ T d2 = (*this + q).Length(); // Antipodal point check\r
+ return (d1 < d2) ? d1 : d2;\r
+ }\r
+\r
+ T DistanceSq(const Quat& q) const {\r
+ T d1 = (*this - q).LengthSq();\r
+ T d2 = (*this + q).LengthSq(); // Antipodal point check\r
+ return (d1 < d2) ? d1 : d2;\r
+ }\r
+\r
+ T Dot(const Quat& q) const {\r
+ return x * q.x + y * q.y + z * q.z + w * q.w;\r
+ }\r
+\r
+ // Angle between two quaternions in radians\r
+ T Angle(const Quat& q) const {\r
+ return T(2) * Acos(Abs(Dot(q)));\r
+ }\r
+\r
+ // Angle of quaternion\r
+ T Angle() const {\r
+ return T(2) * Acos(Abs(w));\r
+ }\r
+\r
+ // Normalize\r
+ bool IsNormalized() const {\r
+ return fabs(LengthSq() - T(1)) < Math<T>::Tolerance();\r
+ }\r
+\r
+ void Normalize() {\r
+ T s = Length();\r
+ if (s != T(0))\r
+ s = T(1) / s;\r
+ *this *= s;\r
+ }\r
+\r
+ Quat Normalized() const {\r
+ T s = Length();\r
+ if (s != T(0))\r
+ s = T(1) / s;\r
+ return *this * s;\r
+ }\r
+\r
+ inline void EnsureSameHemisphere(const Quat& o) {\r
+ if (Dot(o) < T(0)) {\r
+ x = -x;\r
+ y = -y;\r
+ z = -z;\r
+ w = -w;\r
+ }\r
+ }\r
+\r
+ // Returns conjugate of the quaternion. Produces inverse rotation if quaternion is normalized.\r
+ Quat Conj() const {\r
+ return Quat(-x, -y, -z, w);\r
+ }\r
+\r
+ // Quaternion multiplication. Combines quaternion rotations, performing the one on the\r
+ // right hand side first.\r
+ Quat operator*(const Quat& b) const {\r
+ return Quat(\r
+ w * b.x + x * b.w + y * b.z - z * b.y,\r
+ w * b.y - x * b.z + y * b.w + z * b.x,\r
+ w * b.z + x * b.y - y * b.x + z * b.w,\r
+ w * b.w - x * b.x - y * b.y - z * b.z);\r
+ }\r
+ const Quat& operator*=(const Quat& b) {\r
+ *this = *this * b;\r
+ return *this;\r
+ }\r
+\r
+ //\r
+ // this^p normalized; same as rotating by this p times.\r
+ Quat PowNormalized(T p) const {\r
+ Vector3<T> v;\r
+ T a;\r
+ GetAxisAngle(&v, &a);\r
+ return Quat(v, a * p);\r
+ }\r
+\r
+ // Compute quaternion that rotates v into alignTo: alignTo = Quat::Align(alignTo, v).Rotate(v).\r
+ // NOTE: alignTo and v must be normalized.\r
+ static Quat Align(const Vector3<T>& alignTo, const Vector3<T>& v) {\r
+ OVR_MATH_ASSERT(alignTo.IsNormalized() && v.IsNormalized());\r
+ Vector3<T> bisector = (v + alignTo);\r
+ bisector.Normalize();\r
+ T cosHalfAngle = v.Dot(bisector); // 0..1\r
+ if (cosHalfAngle > T(0)) {\r
+ Vector3<T> imag = v.Cross(bisector);\r
+ return Quat(imag.x, imag.y, imag.z, cosHalfAngle);\r
+ } else {\r
+ // cosHalfAngle == 0: a 180 degree rotation.\r
+ // sinHalfAngle == 1, rotation axis is any axis perpendicular\r
+ // to alignTo. Choose axis to include largest magnitude components\r
+ if (fabs(v.x) > fabs(v.y)) {\r
+ // x or z is max magnitude component\r
+ // = Cross(v, (0,1,0)).Normalized();\r
+ T invLen = sqrt(v.x * v.x + v.z * v.z);\r
+ if (invLen > T(0))\r
+ invLen = T(1) / invLen;\r
+ return Quat(-v.z * invLen, 0, v.x * invLen, 0);\r
+ } else {\r
+ // y or z is max magnitude component\r
+ // = Cross(v, (1,0,0)).Normalized();\r
+ T invLen = sqrt(v.y * v.y + v.z * v.z);\r
+ if (invLen > T(0))\r
+ invLen = T(1) / invLen;\r
+ return Quat(0, v.z * invLen, -v.y * invLen, 0);\r
+ }\r
+ }\r
+ }\r
+\r
+ // Decompose a quat into quat = swing * twist, where twist is a rotation about axis,\r
+ // and swing is a rotation perpendicular to axis.\r
+ Quat GetSwingTwist(const Vector3<T>& axis, Quat* twist) const {\r
+ OVR_MATH_ASSERT(twist);\r
+ OVR_MATH_ASSERT(axis.IsNormalized());\r
+\r
+ // Create a normalized quaternion from projection of (x,y,z) onto axis\r
+ T d = axis.Dot(Vector3<T>(x, y, z));\r
+ *twist = Quat(axis.x * d, axis.y * d, axis.z * d, w);\r
+ T len = twist->Length();\r
+ if (len == 0)\r
+ twist->w = T(1); // identity\r
+ else\r
+ *twist /= len; // normalize\r
+\r
+ return *this * twist->Inverted();\r
+ }\r
+\r
+ // Normalized linear interpolation of quaternions\r
+ // NOTE: This function is a bad approximation of Slerp()\r
+ // when the angle between the *this and b is large.\r
+ // Use FastSlerp() or Slerp() instead.\r
+ Quat Lerp(const Quat& b, T s) const {\r
+ return (*this * (T(1) - s) + b * (Dot(b) < 0 ? -s : s)).Normalized();\r
+ }\r
+\r
+ // Spherical linear interpolation between rotations\r
+ Quat Slerp(const Quat& b, T s) const {\r
+ Vector3<T> delta = (b * this->Inverted()).ToRotationVector();\r
+ return (FromRotationVector(delta * s) * *this)\r
+ .Normalized(); // normalize so errors don't accumulate\r
+ }\r
+\r
+ // Spherical linear interpolation: much faster for small rotations, accurate for large rotations.\r
+ // See FastTo/FromRotationVector\r
+ Quat FastSlerp(const Quat& b, T s) const {\r
+ Vector3<T> delta = (b * this->Inverted()).FastToRotationVector();\r
+ return (FastFromRotationVector(delta * s, false) * *this).Normalized();\r
+ }\r
+\r
+ // MERGE_MOBILE_SDK\r
+ // FIXME: This is opposite of Lerp for some reason. It goes from 1 to 0 instead of 0 to 1.\r
+ // Leaving it as a gift for future generations to deal with.\r
+ Quat Nlerp(const Quat& other, T a) const {\r
+ T sign = (Dot(other) >= 0.0f) ? 1.0f : -1.0f;\r
+ return (*this * sign * a + other * (1 - a)).Normalized();\r
+ }\r
+ // MERGE_MOBILE_SDK\r
+\r
+ // Rotate transforms vector in a manner that matches Matrix rotations (counter-clockwise,\r
+ // assuming negative direction of the axis). Standard formula: q(t) * V * q(t)^-1.\r
+ Vector3<T> Rotate(const Vector3<T>& v) const {\r
+ OVR_MATH_ASSERT(IsNormalized()); // If this fires, caller has a quat math bug\r
+\r
+ // rv = q * (v,0) * q'\r
+ // Same as rv = v + real * cross(imag,v)*2 + cross(imag, cross(imag,v)*2);\r
+\r
+ // uv = 2 * Imag().Cross(v);\r
+ T uvx = T(2) * (y * v.z - z * v.y);\r
+ T uvy = T(2) * (z * v.x - x * v.z);\r
+ T uvz = T(2) * (x * v.y - y * v.x);\r
+\r
+ // return v + Real()*uv + Imag().Cross(uv);\r
+ return Vector3<T>(\r
+ v.x + w * uvx + y * uvz - z * uvy,\r
+ v.y + w * uvy + z * uvx - x * uvz,\r
+ v.z + w * uvz + x * uvy - y * uvx);\r
+ }\r
+\r
+ // Rotation by inverse of *this\r
+ Vector3<T> InverseRotate(const Vector3<T>& v) const {\r
+ OVR_MATH_ASSERT(IsNormalized()); // If this fires, caller has a quat math bug\r
+\r
+ // rv = q' * (v,0) * q\r
+ // Same as rv = v + real * cross(-imag,v)*2 + cross(-imag, cross(-imag,v)*2);\r
+ // or rv = v - real * cross(imag,v)*2 + cross(imag, cross(imag,v)*2);\r
+\r
+ // uv = 2 * Imag().Cross(v);\r
+ T uvx = T(2) * (y * v.z - z * v.y);\r
+ T uvy = T(2) * (z * v.x - x * v.z);\r
+ T uvz = T(2) * (x * v.y - y * v.x);\r
+\r
+ // return v - Real()*uv + Imag().Cross(uv);\r
+ return Vector3<T>(\r
+ v.x - w * uvx + y * uvz - z * uvy,\r
+ v.y - w * uvy + z * uvx - x * uvz,\r
+ v.z - w * uvz + x * uvy - y * uvx);\r
+ }\r
+\r
+ // Inversed quaternion rotates in the opposite direction.\r
+ Quat Inverted() const {\r
+ return Quat(-x, -y, -z, w);\r
+ }\r
+\r
+ Quat Inverse() const {\r
+ return Quat(-x, -y, -z, w);\r
+ }\r
+\r
+ // Sets this quaternion to the one rotates in the opposite direction.\r
+ void Invert() {\r
+ *this = Quat(-x, -y, -z, w);\r
+ }\r
+\r
+ // Time integration of constant angular velocity over dt\r
+ Quat TimeIntegrate(const Vector3<T>& angularVelocity, T dt) const {\r
+ // solution is: this * exp( omega*dt/2 ); FromRotationVector(v) gives exp(v*.5).\r
+ return (*this * FastFromRotationVector(angularVelocity * dt, false)).Normalized();\r
+ }\r
+\r
+ // Time integration of constant angular acceleration and velocity over dt\r
+ // These are the first two terms of the "Magnus expansion" of the solution\r
+ //\r
+ // o = o * exp( W=(W1 + W2 + W3+...) * 0.5 );\r
+ //\r
+ // omega1 = (omega + omegaDot*dt)\r
+ // W1 = (omega + omega1)*dt/2\r
+ // W2 = cross(omega, omega1)/12*dt^2 % (= -cross(omega_dot, omega)/12*dt^3)\r
+ // Terms 3 and beyond are vanishingly small:\r
+ // W3 = cross(omega_dot, cross(omega_dot, omega))/240*dt^5\r
+ //\r
+ Quat TimeIntegrate(const Vector3<T>& angularVelocity, const Vector3<T>& angularAcceleration, T dt)\r
+ const {\r
+ const Vector3<T>& omega = angularVelocity;\r
+ const Vector3<T>& omegaDot = angularAcceleration;\r
+\r
+ Vector3<T> omega1 = (omega + omegaDot * dt);\r
+ Vector3<T> W = ((omega + omega1) + omega.Cross(omega1) * (dt / T(6))) * (dt / T(2));\r
+\r
+ // FromRotationVector(v) is exp(v*.5)\r
+ return (*this * FastFromRotationVector(W, false)).Normalized();\r
+ }\r
+\r
+ // Decompose rotation into three rotations:\r
+ // roll radians about Z axis, then pitch radians about X axis, then yaw radians about Y axis.\r
+ // Call with nullptr if a return value is not needed.\r
+ void GetYawPitchRoll(T* yaw, T* pitch, T* roll) const {\r
+ return GetEulerAngles<Axis_Y, Axis_X, Axis_Z, Rotate_CCW, Handed_R>(yaw, pitch, roll);\r
+ }\r
+\r
+ // GetEulerAngles extracts Euler angles from the quaternion, in the specified order of\r
+ // axis rotations and the specified coordinate system. Right-handed coordinate system\r
+ // is the default, with CCW rotations while looking in the negative axis direction.\r
+ // Here a,b,c, are the Yaw/Pitch/Roll angles to be returned.\r
+ // Rotation order is c, b, a:\r
+ // rotation c around axis A3\r
+ // is followed by rotation b around axis A2\r
+ // is followed by rotation a around axis A1\r
+ // rotations are CCW or CW (D) in LH or RH coordinate system (S)\r
+ //\r
+ template <Axis A1, Axis A2, Axis A3, RotateDirection D, HandedSystem S>\r
+ void GetEulerAngles(T* a, T* b, T* c) const {\r
+ OVR_MATH_ASSERT(IsNormalized()); // If this fires, caller has a quat math bug\r
+ OVR_MATH_STATIC_ASSERT(\r
+ (A1 != A2) && (A2 != A3) && (A1 != A3), "(A1 != A2) && (A2 != A3) && (A1 != A3)");\r
+\r
+ T Q[3] = {x, y, z}; // Quaternion components x,y,z\r
+\r
+ T ww = w * w;\r
+ T Q11 = Q[A1] * Q[A1];\r
+ T Q22 = Q[A2] * Q[A2];\r
+ T Q33 = Q[A3] * Q[A3];\r
+\r
+ T psign = T(-1);\r
+ // Determine whether even permutation\r
+ if (((A1 + 1) % 3 == A2) && ((A2 + 1) % 3 == A3))\r
+ psign = T(1);\r
+\r
+ T s2 = psign * T(2) * (psign * w * Q[A2] + Q[A1] * Q[A3]);\r
+\r
+ T singularityRadius = Math<T>::SingularityRadius();\r
+ if (s2 < T(-1) + singularityRadius) { // South pole singularity\r
+ if (a)\r
+ *a = T(0);\r
+ if (b)\r
+ *b = -S * D * ((T)MATH_DOUBLE_PIOVER2);\r
+ if (c)\r
+ *c = S * D * atan2(T(2) * (psign * Q[A1] * Q[A2] + w * Q[A3]), ww + Q22 - Q11 - Q33);\r
+ } else if (s2 > T(1) - singularityRadius) { // North pole singularity\r
+ if (a)\r
+ *a = T(0);\r
+ if (b)\r
+ *b = S * D * ((T)MATH_DOUBLE_PIOVER2);\r
+ if (c)\r
+ *c = S * D * atan2(T(2) * (psign * Q[A1] * Q[A2] + w * Q[A3]), ww + Q22 - Q11 - Q33);\r
+ } else {\r
+ if (a)\r
+ *a = -S * D * atan2(T(-2) * (w * Q[A1] - psign * Q[A2] * Q[A3]), ww + Q33 - Q11 - Q22);\r
+ if (b)\r
+ *b = S * D * asin(s2);\r
+ if (c)\r
+ *c = S * D * atan2(T(2) * (w * Q[A3] - psign * Q[A1] * Q[A2]), ww + Q11 - Q22 - Q33);\r
+ }\r
+ }\r
+\r
+ template <Axis A1, Axis A2, Axis A3, RotateDirection D>\r
+ void GetEulerAngles(T* a, T* b, T* c) const {\r
+ GetEulerAngles<A1, A2, A3, D, Handed_R>(a, b, c);\r
+ }\r
+\r
+ template <Axis A1, Axis A2, Axis A3>\r
+ void GetEulerAngles(T* a, T* b, T* c) const {\r
+ GetEulerAngles<A1, A2, A3, Rotate_CCW, Handed_R>(a, b, c);\r
+ }\r
+\r
+ // GetEulerAnglesABA extracts Euler angles from the quaternion, in the specified order of\r
+ // axis rotations and the specified coordinate system. Right-handed coordinate system\r
+ // is the default, with CCW rotations while looking in the negative axis direction.\r
+ // Here a,b,c, are the Yaw/Pitch/Roll angles to be returned.\r
+ // rotation a around axis A1\r
+ // is followed by rotation b around axis A2\r
+ // is followed by rotation c around axis A1\r
+ // Rotations are CCW or CW (D) in LH or RH coordinate system (S)\r
+ template <Axis A1, Axis A2, RotateDirection D, HandedSystem S>\r
+ void GetEulerAnglesABA(T* a, T* b, T* c) const {\r
+ OVR_MATH_ASSERT(IsNormalized()); // If this fires, caller has a quat math bug\r
+ OVR_MATH_STATIC_ASSERT(A1 != A2, "A1 != A2");\r
+\r
+ T Q[3] = {x, y, z}; // Quaternion components\r
+\r
+ // Determine the missing axis that was not supplied\r
+ int m = 3 - A1 - A2;\r
+\r
+ T ww = w * w;\r
+ T Q11 = Q[A1] * Q[A1];\r
+ T Q22 = Q[A2] * Q[A2];\r
+ T Qmm = Q[m] * Q[m];\r
+\r
+ T psign = T(-1);\r
+ if ((A1 + 1) % 3 == A2) // Determine whether even permutation\r
+ {\r
+ psign = T(1);\r
+ }\r
+\r
+ T c2 = ww + Q11 - Q22 - Qmm;\r
+ T singularityRadius = Math<T>::SingularityRadius();\r
+ if (c2 < T(-1) + singularityRadius) { // South pole singularity\r
+ if (a)\r
+ *a = T(0);\r
+ if (b)\r
+ *b = S * D * ((T)MATH_DOUBLE_PI);\r
+ if (c)\r
+ *c = S * D * atan2(T(2) * (w * Q[A1] - psign * Q[A2] * Q[m]), ww + Q22 - Q11 - Qmm);\r
+ } else if (c2 > T(1) - singularityRadius) { // North pole singularity\r
+ if (a)\r
+ *a = T(0);\r
+ if (b)\r
+ *b = T(0);\r
+ if (c)\r
+ *c = S * D * atan2(T(2) * (w * Q[A1] - psign * Q[A2] * Q[m]), ww + Q22 - Q11 - Qmm);\r
+ } else {\r
+ if (a)\r
+ *a = S * D * atan2(psign * w * Q[m] + Q[A1] * Q[A2], w * Q[A2] - psign * Q[A1] * Q[m]);\r
+ if (b)\r
+ *b = S * D * acos(c2);\r
+ if (c)\r
+ *c = S * D * atan2(-psign * w * Q[m] + Q[A1] * Q[A2], w * Q[A2] + psign * Q[A1] * Q[m]);\r
+ }\r
+ }\r
+\r
+ bool IsNan() const {\r
+ return !isfinite(x + y + z + w);\r
+ }\r
+ bool IsFinite() const {\r
+ return isfinite(x + y + z + w);\r
+ }\r
+};\r
+\r
+typedef Quat<float> Quatf;\r
+typedef Quat<double> Quatd;\r
+\r
+OVR_MATH_STATIC_ASSERT((sizeof(Quatf) == 4 * sizeof(float)), "sizeof(Quatf) failure");\r
+OVR_MATH_STATIC_ASSERT((sizeof(Quatd) == 4 * sizeof(double)), "sizeof(Quatd) failure");\r
+\r
+//-------------------------------------------------------------------------------------\r
+// ***** Pose\r
+//\r
+// Position and orientation combined.\r
+//\r
+// This structure needs to be the same size and layout on 32-bit and 64-bit arch.\r
+// Update OVR_PadCheck.cpp when updating this object.\r
+template <class T>\r
+class Pose {\r
+ public:\r
+ typedef typename CompatibleTypes<Pose<T>>::Type CompatibleType;\r
+\r
+ Pose() {}\r
+ Pose(const Quat<T>& orientation, const Vector3<T>& pos)\r
+ : Rotation(orientation), Translation(pos) {}\r
+ Pose(const Pose& s) : Rotation(s.Rotation), Translation(s.Translation) {}\r
+ Pose(const Matrix3<T>& R, const Vector3<T>& t) : Rotation((Quat<T>)R), Translation(t) {}\r
+ Pose(const CompatibleType& s) : Rotation(s.Orientation), Translation(s.Position) {}\r
+\r
+ explicit Pose(const Pose<typename Math<T>::OtherFloatType>& s)\r
+ : Rotation(s.Rotation), Translation(s.Translation) {\r
+ // Ensure normalized rotation if converting from float to double\r
+ if (sizeof(T) > sizeof(typename Math<T>::OtherFloatType))\r
+ Rotation.Normalize();\r
+ }\r
+\r
+ static Pose Identity() {\r
+ return Pose(Quat<T>(0, 0, 0, 1), Vector3<T>(0, 0, 0));\r
+ }\r
+\r
+ void SetIdentity() {\r
+ Rotation = Quat<T>(0, 0, 0, 1);\r
+ Translation = Vector3<T>(0, 0, 0);\r
+ }\r
+\r
+ // used to make things obviously broken if someone tries to use the value\r
+ void SetInvalid() {\r
+ Rotation = Quat<T>(NAN, NAN, NAN, NAN);\r
+ Translation = Vector3<T>(NAN, NAN, NAN);\r
+ }\r
+\r
+ bool IsEqual(const Pose& b, T tolerance = Math<T>::Tolerance()) const {\r
+ return Translation.IsEqual(b.Translation, tolerance) && Rotation.IsEqual(b.Rotation, tolerance);\r
+ }\r
+\r
+ bool IsEqualMatchHemisphere(const Pose& b, T tolerance = Math<T>::Tolerance()) const {\r
+ return Translation.IsEqual(b.Translation, tolerance) &&\r
+ Rotation.IsEqualMatchHemisphere(b.Rotation, tolerance);\r
+ }\r
+\r
+ operator typename CompatibleTypes<Pose<T>>::Type() const {\r
+ typename CompatibleTypes<Pose<T>>::Type result;\r
+ result.Orientation = Rotation;\r
+ result.Position = Translation;\r
+ return result;\r
+ }\r
+\r
+ Quat<T> Rotation;\r
+ Vector3<T> Translation;\r
+\r
+ OVR_MATH_STATIC_ASSERT(\r
+ (sizeof(T) == sizeof(double) || sizeof(T) == sizeof(float)),\r
+ "(sizeof(T) == sizeof(double) || sizeof(T) == sizeof(float))");\r
+\r
+ void ToArray(T* arr) const {\r
+ T temp[7] = {Rotation.x,\r
+ Rotation.y,\r
+ Rotation.z,\r
+ Rotation.w,\r
+ Translation.x,\r
+ Translation.y,\r
+ Translation.z};\r
+ for (int i = 0; i < 7; i++)\r
+ arr[i] = temp[i];\r
+ }\r
+\r
+ static Pose<T> FromArray(const T* v) {\r
+ Quat<T> rotation(v[0], v[1], v[2], v[3]);\r
+ Vector3<T> translation(v[4], v[5], v[6]);\r
+ // Ensure rotation is normalized, in case it was originally a float, stored in a .json file,\r
+ // etc.\r
+ return Pose<T>(rotation.Normalized(), translation);\r
+ }\r
+\r
+ Vector3<T> Rotate(const Vector3<T>& v) const {\r
+ return Rotation.Rotate(v);\r
+ }\r
+\r
+ Vector3<T> InverseRotate(const Vector3<T>& v) const {\r
+ return Rotation.InverseRotate(v);\r
+ }\r
+\r
+ Vector3<T> Translate(const Vector3<T>& v) const {\r
+ return v + Translation;\r
+ }\r
+\r
+ Vector3<T> Transform(const Vector3<T>& v) const {\r
+ return Rotate(v) + Translation;\r
+ }\r
+\r
+ Vector3<T> InverseTransform(const Vector3<T>& v) const {\r
+ return InverseRotate(v - Translation);\r
+ }\r
+\r
+ Vector3<T> TransformNormal(const Vector3<T>& v) const {\r
+ return Rotate(v);\r
+ }\r
+\r
+ Vector3<T> InverseTransformNormal(const Vector3<T>& v) const {\r
+ return InverseRotate(v);\r
+ }\r
+\r
+ Vector3<T> Apply(const Vector3<T>& v) const {\r
+ return Transform(v);\r
+ }\r
+\r
+ Pose operator*(const Pose& other) const {\r
+ return Pose(Rotation * other.Rotation, Apply(other.Translation));\r
+ }\r
+\r
+ Pose Inverted() const {\r
+ Quat<T> inv = Rotation.Inverted();\r
+ return Pose(inv, inv.Rotate(-Translation));\r
+ }\r
+\r
+ // Interpolation between two poses: translation is interpolated with Lerp(),\r
+ // and rotations are interpolated with Slerp().\r
+ Pose Lerp(const Pose& b, T s) const {\r
+ return Pose(Rotation.Slerp(b.Rotation, s), Translation.Lerp(b.Translation, s));\r
+ }\r
+\r
+ // Similar to Lerp above, except faster in case of small rotation differences. See\r
+ // Quat<T>::FastSlerp.\r
+ Pose FastLerp(const Pose& b, T s) const {\r
+ return Pose(Rotation.FastSlerp(b.Rotation, s), Translation.Lerp(b.Translation, s));\r
+ }\r
+\r
+ Pose TimeIntegrate(const Vector3<T>& linearVelocity, const Vector3<T>& angularVelocity, T dt)\r
+ const {\r
+ return Pose(\r
+ (Rotation * Quat<T>::FastFromRotationVector(angularVelocity * dt, false)).Normalized(),\r
+ Translation + linearVelocity * dt);\r
+ }\r
+\r
+ Pose TimeIntegrate(\r
+ const Vector3<T>& linearVelocity,\r
+ const Vector3<T>& linearAcceleration,\r
+ const Vector3<T>& angularVelocity,\r
+ const Vector3<T>& angularAcceleration,\r
+ T dt) const {\r
+ return Pose(\r
+ Rotation.TimeIntegrate(angularVelocity, angularAcceleration, dt),\r
+ Translation + linearVelocity * dt + linearAcceleration * dt * dt * T(0.5));\r
+ }\r
+\r
+ Pose Normalized() const {\r
+ return Pose(Rotation.Normalized(), Translation);\r
+ }\r
+ void Normalize() {\r
+ Rotation.Normalize();\r
+ }\r
+\r
+ bool IsNan() const {\r
+ return Translation.IsNan() || Rotation.IsNan();\r
+ }\r
+ bool IsFinite() const {\r
+ return Translation.IsFinite() && Rotation.IsFinite();\r
+ }\r
+};\r
+\r
+typedef Pose<float> Posef;\r
+typedef Pose<double> Posed;\r
+\r
+OVR_MATH_STATIC_ASSERT(\r
+ (sizeof(Posed) == sizeof(Quatd) + sizeof(Vector3d)),\r
+ "sizeof(Posed) failure");\r
+OVR_MATH_STATIC_ASSERT(\r
+ (sizeof(Posef) == sizeof(Quatf) + sizeof(Vector3f)),\r
+ "sizeof(Posef) failure");\r
+\r
+//-------------------------------------------------------------------------------------\r
+// ***** Matrix4\r
+//\r
+// Matrix4 is a 4x4 matrix used for 3d transformations and projections.\r
+// Translation stored in the last column.\r
+// The matrix is stored in row-major order in memory, meaning that values\r
+// of the first row are stored before the next one.\r
+//\r
+// The arrangement of the matrix is chosen to be in Right-Handed\r
+// coordinate system and counterclockwise rotations when looking down\r
+// the axis\r
+//\r
+// Transformation Order:\r
+// - Transformations are applied from right to left, so the expression\r
+// M1 * M2 * M3 * V means that the vector V is transformed by M3 first,\r
+// followed by M2 and M1.\r
+//\r
+// Coordinate system: Right Handed\r
+//\r
+// Rotations: Counterclockwise when looking down the axis. All angles are in radians.\r
+//\r
+// | sx 01 02 tx | // First column (sx, 10, 20): Axis X basis vector.\r
+// | 10 sy 12 ty | // Second column (01, sy, 21): Axis Y basis vector.\r
+// | 20 21 sz tz | // Third columnt (02, 12, sz): Axis Z basis vector.\r
+// | 30 31 32 33 |\r
+//\r
+// The basis vectors are first three columns.\r
+\r
+template <class T>\r
+class Matrix4 {\r
+ public:\r
+ typedef T ElementType;\r
+ static const size_t Dimension = 4;\r
+\r
+ T M[4][4];\r
+\r
+ enum NoInitType { NoInit };\r
+\r
+ // Construct with no memory initialization.\r
+ Matrix4(NoInitType) {}\r
+\r
+ // By default, we construct identity matrix.\r
+ Matrix4() {\r
+ M[0][0] = M[1][1] = M[2][2] = M[3][3] = T(1);\r
+ M[0][1] = M[1][0] = M[2][3] = M[3][1] = T(0);\r
+ M[0][2] = M[1][2] = M[2][0] = M[3][2] = T(0);\r
+ M[0][3] = M[1][3] = M[2][1] = M[3][0] = T(0);\r
+ }\r
+\r
+ Matrix4(\r
+ T m11,\r
+ T m12,\r
+ T m13,\r
+ T m14,\r
+ T m21,\r
+ T m22,\r
+ T m23,\r
+ T m24,\r
+ T m31,\r
+ T m32,\r
+ T m33,\r
+ T m34,\r
+ T m41,\r
+ T m42,\r
+ T m43,\r
+ T m44) {\r
+ M[0][0] = m11;\r
+ M[0][1] = m12;\r
+ M[0][2] = m13;\r
+ M[0][3] = m14;\r
+ M[1][0] = m21;\r
+ M[1][1] = m22;\r
+ M[1][2] = m23;\r
+ M[1][3] = m24;\r
+ M[2][0] = m31;\r
+ M[2][1] = m32;\r
+ M[2][2] = m33;\r
+ M[2][3] = m34;\r
+ M[3][0] = m41;\r
+ M[3][1] = m42;\r
+ M[3][2] = m43;\r
+ M[3][3] = m44;\r
+ }\r
+\r
+ Matrix4(T m11, T m12, T m13, T m21, T m22, T m23, T m31, T m32, T m33) {\r
+ M[0][0] = m11;\r
+ M[0][1] = m12;\r
+ M[0][2] = m13;\r
+ M[0][3] = T(0);\r
+ M[1][0] = m21;\r
+ M[1][1] = m22;\r
+ M[1][2] = m23;\r
+ M[1][3] = T(0);\r
+ M[2][0] = m31;\r
+ M[2][1] = m32;\r
+ M[2][2] = m33;\r
+ M[2][3] = T(0);\r
+ M[3][0] = T(0);\r
+ M[3][1] = T(0);\r
+ M[3][2] = T(0);\r
+ M[3][3] = T(1);\r
+ }\r
+\r
+ explicit Matrix4(const Matrix3<T>& m) {\r
+ M[0][0] = m.M[0][0];\r
+ M[0][1] = m.M[0][1];\r
+ M[0][2] = m.M[0][2];\r
+ M[0][3] = T(0);\r
+ M[1][0] = m.M[1][0];\r
+ M[1][1] = m.M[1][1];\r
+ M[1][2] = m.M[1][2];\r
+ M[1][3] = T(0);\r
+ M[2][0] = m.M[2][0];\r
+ M[2][1] = m.M[2][1];\r
+ M[2][2] = m.M[2][2];\r
+ M[2][3] = T(0);\r
+ M[3][0] = T(0);\r
+ M[3][1] = T(0);\r
+ M[3][2] = T(0);\r
+ M[3][3] = T(1);\r
+ }\r
+\r
+ explicit Matrix4(const Quat<T>& q) {\r
+ OVR_MATH_ASSERT(q.IsNormalized()); // If this fires, caller has a quat math bug\r
+ T ww = q.w * q.w;\r
+ T xx = q.x * q.x;\r
+ T yy = q.y * q.y;\r
+ T zz = q.z * q.z;\r
+\r
+ M[0][0] = ww + xx - yy - zz;\r
+ M[0][1] = 2 * (q.x * q.y - q.w * q.z);\r
+ M[0][2] = 2 * (q.x * q.z + q.w * q.y);\r
+ M[0][3] = T(0);\r
+ M[1][0] = 2 * (q.x * q.y + q.w * q.z);\r
+ M[1][1] = ww - xx + yy - zz;\r
+ M[1][2] = 2 * (q.y * q.z - q.w * q.x);\r
+ M[1][3] = T(0);\r
+ M[2][0] = 2 * (q.x * q.z - q.w * q.y);\r
+ M[2][1] = 2 * (q.y * q.z + q.w * q.x);\r
+ M[2][2] = ww - xx - yy + zz;\r
+ M[2][3] = T(0);\r
+ M[3][0] = T(0);\r
+ M[3][1] = T(0);\r
+ M[3][2] = T(0);\r
+ M[3][3] = T(1);\r
+ }\r
+\r
+ explicit Matrix4(const Pose<T>& p) {\r
+ Matrix4 result(p.Rotation);\r
+ result.SetTranslation(p.Translation);\r
+ *this = result;\r
+ }\r
+\r
+ // C-interop support\r
+ explicit Matrix4(const Matrix4<typename Math<T>::OtherFloatType>& src) {\r
+ for (int i = 0; i < 4; i++)\r
+ for (int j = 0; j < 4; j++)\r
+ M[i][j] = (T)src.M[i][j];\r
+ }\r
+\r
+ // C-interop support.\r
+ Matrix4(const typename CompatibleTypes<Matrix4<T>>::Type& s) {\r
+ OVR_MATH_STATIC_ASSERT(sizeof(s) == sizeof(Matrix4), "sizeof(s) == sizeof(Matrix4)");\r
+ memcpy(M, s.M, sizeof(M));\r
+ }\r
+\r
+ operator typename CompatibleTypes<Matrix4<T>>::Type() const {\r
+ typename CompatibleTypes<Matrix4<T>>::Type result;\r
+ OVR_MATH_STATIC_ASSERT(sizeof(result) == sizeof(Matrix4), "sizeof(result) == sizeof(Matrix4)");\r
+ memcpy(result.M, M, sizeof(M));\r
+ return result;\r
+ }\r
+\r
+ void ToString(char* dest, size_t destsize) const {\r
+ size_t pos = 0;\r
+ for (int r = 0; r < 4; r++) {\r
+ for (int c = 0; c < 4; c++) {\r
+ pos += OVRMath_sprintf(dest + pos, destsize - pos, "%g ", M[r][c]);\r
+ }\r
+ }\r
+ }\r
+\r
+ static Matrix4 FromString(const char* src) {\r
+ Matrix4 result;\r
+ if (src) {\r
+ for (int r = 0; r < 4; r++) {\r
+ for (int c = 0; c < 4; c++) {\r
+ result.M[r][c] = (T)atof(src);\r
+ while (*src && *src != ' ') {\r
+ src++;\r
+ }\r
+ while (*src && *src == ' ') {\r
+ src++;\r
+ }\r
+ }\r
+ }\r
+ }\r
+ return result;\r
+ }\r
+\r
+ static Matrix4 Identity() {\r
+ return Matrix4();\r
+ }\r
+\r
+ void SetIdentity() {\r
+ M[0][0] = M[1][1] = M[2][2] = M[3][3] = T(1);\r
+ M[0][1] = M[1][0] = M[2][3] = M[3][1] = T(0);\r
+ M[0][2] = M[1][2] = M[2][0] = M[3][2] = T(0);\r
+ M[0][3] = M[1][3] = M[2][1] = M[3][0] = T(0);\r
+ }\r
+\r
+ void SetXBasis(const Vector3<T>& v) {\r
+ M[0][0] = v.x;\r
+ M[1][0] = v.y;\r
+ M[2][0] = v.z;\r
+ }\r
+ Vector3<T> GetXBasis() const {\r
+ return Vector3<T>(M[0][0], M[1][0], M[2][0]);\r
+ }\r
+\r
+ void SetYBasis(const Vector3<T>& v) {\r
+ M[0][1] = v.x;\r
+ M[1][1] = v.y;\r
+ M[2][1] = v.z;\r
+ }\r
+ Vector3<T> GetYBasis() const {\r
+ return Vector3<T>(M[0][1], M[1][1], M[2][1]);\r
+ }\r
+\r
+ void SetZBasis(const Vector3<T>& v) {\r
+ M[0][2] = v.x;\r
+ M[1][2] = v.y;\r
+ M[2][2] = v.z;\r
+ }\r
+ Vector3<T> GetZBasis() const {\r
+ return Vector3<T>(M[0][2], M[1][2], M[2][2]);\r
+ }\r
+\r
+ bool operator==(const Matrix4& b) const {\r
+ bool isEqual = true;\r
+ for (int i = 0; i < 4; i++)\r
+ for (int j = 0; j < 4; j++)\r
+ isEqual &= (M[i][j] == b.M[i][j]);\r
+\r
+ return isEqual;\r
+ }\r
+\r
+ Matrix4 operator+(const Matrix4& b) const {\r
+ Matrix4 result(*this);\r
+ result += b;\r
+ return result;\r
+ }\r
+\r
+ Matrix4& operator+=(const Matrix4& b) {\r
+ for (int i = 0; i < 4; i++)\r
+ for (int j = 0; j < 4; j++)\r
+ M[i][j] += b.M[i][j];\r
+ return *this;\r
+ }\r
+\r
+ Matrix4 operator-(const Matrix4& b) const {\r
+ Matrix4 result(*this);\r
+ result -= b;\r
+ return result;\r
+ }\r
+\r
+ Matrix4& operator-=(const Matrix4& b) {\r
+ for (int i = 0; i < 4; i++)\r
+ for (int j = 0; j < 4; j++)\r
+ M[i][j] -= b.M[i][j];\r
+ return *this;\r
+ }\r
+\r
+ // Multiplies two matrices into destination with minimum copying.\r
+ static Matrix4& Multiply(Matrix4* d, const Matrix4& a, const Matrix4& b) {\r
+ OVR_MATH_ASSERT((d != &a) && (d != &b));\r
+ int i = 0;\r
+ do {\r
+ d->M[i][0] = a.M[i][0] * b.M[0][0] + a.M[i][1] * b.M[1][0] + a.M[i][2] * b.M[2][0] +\r
+ a.M[i][3] * b.M[3][0];\r
+ d->M[i][1] = a.M[i][0] * b.M[0][1] + a.M[i][1] * b.M[1][1] + a.M[i][2] * b.M[2][1] +\r
+ a.M[i][3] * b.M[3][1];\r
+ d->M[i][2] = a.M[i][0] * b.M[0][2] + a.M[i][1] * b.M[1][2] + a.M[i][2] * b.M[2][2] +\r
+ a.M[i][3] * b.M[3][2];\r
+ d->M[i][3] = a.M[i][0] * b.M[0][3] + a.M[i][1] * b.M[1][3] + a.M[i][2] * b.M[2][3] +\r
+ a.M[i][3] * b.M[3][3];\r
+ } while ((++i) < 4);\r
+\r
+ return *d;\r
+ }\r
+\r
+ Matrix4 operator*(const Matrix4& b) const {\r
+ Matrix4 result(Matrix4::NoInit);\r
+ Multiply(&result, *this, b);\r
+ return result;\r
+ }\r
+\r
+ Matrix4& operator*=(const Matrix4& b) {\r
+ return Multiply(this, Matrix4(*this), b);\r
+ }\r
+\r
+ Matrix4 operator*(T s) const {\r
+ Matrix4 result(*this);\r
+ result *= s;\r
+ return result;\r
+ }\r
+\r
+ Matrix4& operator*=(T s) {\r
+ for (int i = 0; i < 4; i++)\r
+ for (int j = 0; j < 4; j++)\r
+ M[i][j] *= s;\r
+ return *this;\r
+ }\r
+\r
+ Matrix4 operator/(T s) const {\r
+ Matrix4 result(*this);\r
+ result /= s;\r
+ return result;\r
+ }\r
+\r
+ Matrix4& operator/=(T s) {\r
+ for (int i = 0; i < 4; i++)\r
+ for (int j = 0; j < 4; j++)\r
+ M[i][j] /= s;\r
+ return *this;\r
+ }\r
+\r
+ T operator()(int i, int j) const {\r
+ return M[i][j];\r
+ }\r
+ T& operator()(int i, int j) {\r
+ return M[i][j];\r
+ }\r
+\r
+ Vector4<T> operator*(const Vector4<T>& b) const {\r
+ return Transform(b);\r
+ }\r
+\r
+ Vector3<T> Transform(const Vector3<T>& v) const {\r
+ const T rcpW = T(1) / (M[3][0] * v.x + M[3][1] * v.y + M[3][2] * v.z + M[3][3]);\r
+ return Vector3<T>(\r
+ (M[0][0] * v.x + M[0][1] * v.y + M[0][2] * v.z + M[0][3]) * rcpW,\r
+ (M[1][0] * v.x + M[1][1] * v.y + M[1][2] * v.z + M[1][3]) * rcpW,\r
+ (M[2][0] * v.x + M[2][1] * v.y + M[2][2] * v.z + M[2][3]) * rcpW);\r
+ }\r
+\r
+ Vector4<T> Transform(const Vector4<T>& v) const {\r
+ return Vector4<T>(\r
+ M[0][0] * v.x + M[0][1] * v.y + M[0][2] * v.z + M[0][3] * v.w,\r
+ M[1][0] * v.x + M[1][1] * v.y + M[1][2] * v.z + M[1][3] * v.w,\r
+ M[2][0] * v.x + M[2][1] * v.y + M[2][2] * v.z + M[2][3] * v.w,\r
+ M[3][0] * v.x + M[3][1] * v.y + M[3][2] * v.z + M[3][3] * v.w);\r
+ }\r
+\r
+ Matrix4 Transposed() const {\r
+ return Matrix4(\r
+ M[0][0],\r
+ M[1][0],\r
+ M[2][0],\r
+ M[3][0],\r
+ M[0][1],\r
+ M[1][1],\r
+ M[2][1],\r
+ M[3][1],\r
+ M[0][2],\r
+ M[1][2],\r
+ M[2][2],\r
+ M[3][2],\r
+ M[0][3],\r
+ M[1][3],\r
+ M[2][3],\r
+ M[3][3]);\r
+ }\r
+\r
+ void Transpose() {\r
+ *this = Transposed();\r
+ }\r
+\r
+ T SubDet(const size_t* rows, const size_t* cols) const {\r
+ return M[rows[0]][cols[0]] *\r
+ (M[rows[1]][cols[1]] * M[rows[2]][cols[2]] - M[rows[1]][cols[2]] * M[rows[2]][cols[1]]) -\r
+ M[rows[0]][cols[1]] *\r
+ (M[rows[1]][cols[0]] * M[rows[2]][cols[2]] - M[rows[1]][cols[2]] * M[rows[2]][cols[0]]) +\r
+ M[rows[0]][cols[2]] *\r
+ (M[rows[1]][cols[0]] * M[rows[2]][cols[1]] - M[rows[1]][cols[1]] * M[rows[2]][cols[0]]);\r
+ }\r
+\r
+ T Cofactor(size_t I, size_t J) const {\r
+ const size_t indices[4][3] = {{1, 2, 3}, {0, 2, 3}, {0, 1, 3}, {0, 1, 2}};\r
+ return ((I + J) & 1) ? -SubDet(indices[I], indices[J]) : SubDet(indices[I], indices[J]);\r
+ }\r
+\r
+ T Determinant() const {\r
+ return M[0][0] * Cofactor(0, 0) + M[0][1] * Cofactor(0, 1) + M[0][2] * Cofactor(0, 2) +\r
+ M[0][3] * Cofactor(0, 3);\r
+ }\r
+\r
+ Matrix4 Adjugated() const {\r
+ return Matrix4(\r
+ Cofactor(0, 0),\r
+ Cofactor(1, 0),\r
+ Cofactor(2, 0),\r
+ Cofactor(3, 0),\r
+ Cofactor(0, 1),\r
+ Cofactor(1, 1),\r
+ Cofactor(2, 1),\r
+ Cofactor(3, 1),\r
+ Cofactor(0, 2),\r
+ Cofactor(1, 2),\r
+ Cofactor(2, 2),\r
+ Cofactor(3, 2),\r
+ Cofactor(0, 3),\r
+ Cofactor(1, 3),\r
+ Cofactor(2, 3),\r
+ Cofactor(3, 3));\r
+ }\r
+\r
+ Matrix4 Inverted() const {\r
+ T det = Determinant();\r
+ OVR_MATH_ASSERT(det != 0);\r
+ return Adjugated() * (T(1) / det);\r
+ }\r
+\r
+ void Invert() {\r
+ *this = Inverted();\r
+ }\r
+\r
+ // This is more efficient than general inverse, but ONLY works\r
+ // correctly if it is a homogeneous transform matrix (rot + trans)\r
+ Matrix4 InvertedHomogeneousTransform() const {\r
+ // Make the inverse rotation matrix\r
+ Matrix4 rinv = this->Transposed();\r
+ rinv.M[3][0] = rinv.M[3][1] = rinv.M[3][2] = T(0);\r
+ // Make the inverse translation matrix\r
+ Vector3<T> tvinv(-M[0][3], -M[1][3], -M[2][3]);\r
+ Matrix4 tinv = Matrix4::Translation(tvinv);\r
+ return rinv * tinv; // "untranslate", then "unrotate"\r
+ }\r
+\r
+ // This is more efficient than general inverse, but ONLY works\r
+ // correctly if it is a homogeneous transform matrix (rot + trans)\r
+ void InvertHomogeneousTransform() {\r
+ *this = InvertedHomogeneousTransform();\r
+ }\r
+\r
+ // Matrix to Euler Angles conversion\r
+ // a,b,c, are the YawPitchRoll angles to be returned\r
+ // rotation a around axis A1\r
+ // is followed by rotation b around axis A2\r
+ // is followed by rotation c around axis A3\r
+ // rotations are CCW or CW (D) in LH or RH coordinate system (S)\r
+ template <Axis A1, Axis A2, Axis A3, RotateDirection D, HandedSystem S>\r
+ void ToEulerAngles(T* a, T* b, T* c) const {\r
+ OVR_MATH_STATIC_ASSERT(\r
+ (A1 != A2) && (A2 != A3) && (A1 != A3), "(A1 != A2) && (A2 != A3) && (A1 != A3)");\r
+\r
+ T psign = T(-1);\r
+ if (((A1 + 1) % 3 == A2) && ((A2 + 1) % 3 == A3)) // Determine whether even permutation\r
+ psign = T(1);\r
+\r
+ T pm = psign * M[A1][A3];\r
+ T singularityRadius = Math<T>::SingularityRadius();\r
+ if (pm < T(-1) + singularityRadius) { // South pole singularity\r
+ *a = T(0);\r
+ *b = -S * D * ((T)MATH_DOUBLE_PIOVER2);\r
+ *c = S * D * atan2(psign * M[A2][A1], M[A2][A2]);\r
+ } else if (pm > T(1) - singularityRadius) { // North pole singularity\r
+ *a = T(0);\r
+ *b = S * D * ((T)MATH_DOUBLE_PIOVER2);\r
+ *c = S * D * atan2(psign * M[A2][A1], M[A2][A2]);\r
+ } else { // Normal case (nonsingular)\r
+ *a = S * D * atan2(-psign * M[A2][A3], M[A3][A3]);\r
+ *b = S * D * asin(pm);\r
+ *c = S * D * atan2(-psign * M[A1][A2], M[A1][A1]);\r
+ }\r
+ }\r
+\r
+ // Matrix to Euler Angles conversion\r
+ // a,b,c, are the YawPitchRoll angles to be returned\r
+ // rotation a around axis A1\r
+ // is followed by rotation b around axis A2\r
+ // is followed by rotation c around axis A1\r
+ // rotations are CCW or CW (D) in LH or RH coordinate system (S)\r
+ template <Axis A1, Axis A2, RotateDirection D, HandedSystem S>\r
+ void ToEulerAnglesABA(T* a, T* b, T* c) const {\r
+ OVR_MATH_STATIC_ASSERT(A1 != A2, "A1 != A2");\r
+\r
+ // Determine the axis that was not supplied\r
+ int m = 3 - A1 - A2;\r
+\r
+ T psign = T(-1);\r
+ if ((A1 + 1) % 3 == A2) // Determine whether even permutation\r
+ psign = T(1);\r
+\r
+ T c2 = M[A1][A1];\r
+ T singularityRadius = Math<T>::SingularityRadius();\r
+ if (c2 < T(-1) + singularityRadius) { // South pole singularity\r
+ *a = T(0);\r
+ *b = S * D * ((T)MATH_DOUBLE_PI);\r
+ *c = S * D * atan2(-psign * M[A2][m], M[A2][A2]);\r
+ } else if (c2 > T(1) - singularityRadius) { // North pole singularity\r
+ *a = T(0);\r
+ *b = T(0);\r
+ *c = S * D * atan2(-psign * M[A2][m], M[A2][A2]);\r
+ } else { // Normal case (nonsingular)\r
+ *a = S * D * atan2(M[A2][A1], -psign * M[m][A1]);\r
+ *b = S * D * acos(c2);\r
+ *c = S * D * atan2(M[A1][A2], psign * M[A1][m]);\r
+ }\r
+ }\r
+\r
+ // Creates a matrix that converts the vertices from one coordinate system\r
+ // to another.\r
+ static Matrix4 AxisConversion(const WorldAxes& to, const WorldAxes& from) {\r
+ // Holds axis values from the 'to' structure\r
+ int toArray[3] = {to.XAxis, to.YAxis, to.ZAxis};\r
+\r
+ // The inverse of the toArray\r
+ int inv[4];\r
+ inv[0] = inv[abs(to.XAxis)] = 0;\r
+ inv[abs(to.YAxis)] = 1;\r
+ inv[abs(to.ZAxis)] = 2;\r
+\r
+ Matrix4 m(0, 0, 0, 0, 0, 0, 0, 0, 0);\r
+\r
+ // Only three values in the matrix need to be changed to 1 or -1.\r
+ m.M[inv[abs(from.XAxis)]][0] = T(from.XAxis / toArray[inv[abs(from.XAxis)]]);\r
+ m.M[inv[abs(from.YAxis)]][1] = T(from.YAxis / toArray[inv[abs(from.YAxis)]]);\r
+ m.M[inv[abs(from.ZAxis)]][2] = T(from.ZAxis / toArray[inv[abs(from.ZAxis)]]);\r
+ return m;\r
+ }\r
+\r
+ // Creates a matrix for translation by vector\r
+ static Matrix4 Translation(const Vector3<T>& v) {\r
+ Matrix4 t;\r
+ t.M[0][3] = v.x;\r
+ t.M[1][3] = v.y;\r
+ t.M[2][3] = v.z;\r
+ return t;\r
+ }\r
+\r
+ // Creates a matrix for translation by vector\r
+ static Matrix4 Translation(T x, T y, T z = T(0)) {\r
+ Matrix4 t;\r
+ t.M[0][3] = x;\r
+ t.M[1][3] = y;\r
+ t.M[2][3] = z;\r
+ return t;\r
+ }\r
+\r
+ // Sets the translation part\r
+ void SetTranslation(const Vector3<T>& v) {\r
+ M[0][3] = v.x;\r
+ M[1][3] = v.y;\r
+ M[2][3] = v.z;\r
+ }\r
+\r
+ Vector3<T> GetTranslation() const {\r
+ return Vector3<T>(M[0][3], M[1][3], M[2][3]);\r
+ }\r
+\r
+ // Creates a matrix for scaling by vector\r
+ static Matrix4 Scaling(const Vector3<T>& v) {\r
+ Matrix4 t;\r
+ t.M[0][0] = v.x;\r
+ t.M[1][1] = v.y;\r
+ t.M[2][2] = v.z;\r
+ return t;\r
+ }\r
+\r
+ // Creates a matrix for scaling by vector\r
+ static Matrix4 Scaling(T x, T y, T z) {\r
+ Matrix4 t;\r
+ t.M[0][0] = x;\r
+ t.M[1][1] = y;\r
+ t.M[2][2] = z;\r
+ return t;\r
+ }\r
+\r
+ // Creates a matrix for scaling by constant\r
+ static Matrix4 Scaling(T s) {\r
+ Matrix4 t;\r
+ t.M[0][0] = s;\r
+ t.M[1][1] = s;\r
+ t.M[2][2] = s;\r
+ return t;\r
+ }\r
+\r
+ // Simple L1 distance in R^12\r
+ T Distance(const Matrix4& m2) const {\r
+ T d = fabs(M[0][0] - m2.M[0][0]) + fabs(M[0][1] - m2.M[0][1]);\r
+ d += fabs(M[0][2] - m2.M[0][2]) + fabs(M[0][3] - m2.M[0][3]);\r
+ d += fabs(M[1][0] - m2.M[1][0]) + fabs(M[1][1] - m2.M[1][1]);\r
+ d += fabs(M[1][2] - m2.M[1][2]) + fabs(M[1][3] - m2.M[1][3]);\r
+ d += fabs(M[2][0] - m2.M[2][0]) + fabs(M[2][1] - m2.M[2][1]);\r
+ d += fabs(M[2][2] - m2.M[2][2]) + fabs(M[2][3] - m2.M[2][3]);\r
+ d += fabs(M[3][0] - m2.M[3][0]) + fabs(M[3][1] - m2.M[3][1]);\r
+ d += fabs(M[3][2] - m2.M[3][2]) + fabs(M[3][3] - m2.M[3][3]);\r
+ return d;\r
+ }\r
+\r
+ // Creates a rotation matrix rotating around the X axis by 'angle' radians.\r
+ // Just for quick testing. Not for final API. Need to remove case.\r
+ static Matrix4 RotationAxis(Axis A, T angle, RotateDirection d, HandedSystem s) {\r
+ T sina = s * d * sin(angle);\r
+ T cosa = cos(angle);\r
+\r
+ switch (A) {\r
+ case Axis_X:\r
+ return Matrix4(1, 0, 0, 0, cosa, -sina, 0, sina, cosa);\r
+ case Axis_Y:\r
+ return Matrix4(cosa, 0, sina, 0, 1, 0, -sina, 0, cosa);\r
+ case Axis_Z:\r
+ return Matrix4(cosa, -sina, 0, sina, cosa, 0, 0, 0, 1);\r
+ default:\r
+ return Matrix4();\r
+ }\r
+ }\r
+\r
+ // Creates a rotation matrix rotating around the X axis by 'angle' radians.\r
+ // Rotation direction is depends on the coordinate system:\r
+ // RHS (Oculus default): Positive angle values rotate Counter-clockwise (CCW),\r
+ // while looking in the negative axis direction. This is the\r
+ // same as looking down from positive axis values towards origin.\r
+ // LHS: Positive angle values rotate clock-wise (CW), while looking in the\r
+ // negative axis direction.\r
+ static Matrix4 RotationX(T angle) {\r
+ T sina = sin(angle);\r
+ T cosa = cos(angle);\r
+ return Matrix4(1, 0, 0, 0, cosa, -sina, 0, sina, cosa);\r
+ }\r
+\r
+ // Creates a rotation matrix rotating around the Y axis by 'angle' radians.\r
+ // Rotation direction is depends on the coordinate system:\r
+ // RHS (Oculus default): Positive angle values rotate Counter-clockwise (CCW),\r
+ // while looking in the negative axis direction. This is the\r
+ // same as looking down from positive axis values towards origin.\r
+ // LHS: Positive angle values rotate clock-wise (CW), while looking in the\r
+ // negative axis direction.\r
+ static Matrix4 RotationY(T angle) {\r
+ T sina = (T)sin(angle);\r
+ T cosa = (T)cos(angle);\r
+ return Matrix4(cosa, 0, sina, 0, 1, 0, -sina, 0, cosa);\r
+ }\r
+\r
+ // Creates a rotation matrix rotating around the Z axis by 'angle' radians.\r
+ // Rotation direction is depends on the coordinate system:\r
+ // RHS (Oculus default): Positive angle values rotate Counter-clockwise (CCW),\r
+ // while looking in the negative axis direction. This is the\r
+ // same as looking down from positive axis values towards origin.\r
+ // LHS: Positive angle values rotate clock-wise (CW), while looking in the\r
+ // negative axis direction.\r
+ static Matrix4 RotationZ(T angle) {\r
+ T sina = sin(angle);\r
+ T cosa = cos(angle);\r
+ return Matrix4(cosa, -sina, 0, sina, cosa, 0, 0, 0, 1);\r
+ }\r
+\r
+ // LookAtRH creates a View transformation matrix for right-handed coordinate system.\r
+ // The resulting matrix points camera from 'eye' towards 'at' direction, with 'up'\r
+ // specifying the up vector. The resulting matrix should be used with PerspectiveRH\r
+ // projection.\r
+ static Matrix4 LookAtRH(const Vector3<T>& eye, const Vector3<T>& at, const Vector3<T>& up) {\r
+ Vector3<T> z = (eye - at).Normalized(); // Forward\r
+ Vector3<T> x = up.Cross(z).Normalized(); // Right\r
+ Vector3<T> y = z.Cross(x);\r
+\r
+ Matrix4 m(\r
+ x.x,\r
+ x.y,\r
+ x.z,\r
+ -(x.Dot(eye)),\r
+ y.x,\r
+ y.y,\r
+ y.z,\r
+ -(y.Dot(eye)),\r
+ z.x,\r
+ z.y,\r
+ z.z,\r
+ -(z.Dot(eye)),\r
+ 0,\r
+ 0,\r
+ 0,\r
+ 1);\r
+ return m;\r
+ }\r
+\r
+ // LookAtLH creates a View transformation matrix for left-handed coordinate system.\r
+ // The resulting matrix points camera from 'eye' towards 'at' direction, with 'up'\r
+ // specifying the up vector.\r
+ static Matrix4 LookAtLH(const Vector3<T>& eye, const Vector3<T>& at, const Vector3<T>& up) {\r
+ Vector3<T> z = (at - eye).Normalized(); // Forward\r
+ Vector3<T> x = up.Cross(z).Normalized(); // Right\r
+ Vector3<T> y = z.Cross(x);\r
+\r
+ Matrix4 m(\r
+ x.x,\r
+ x.y,\r
+ x.z,\r
+ -(x.Dot(eye)),\r
+ y.x,\r
+ y.y,\r
+ y.z,\r
+ -(y.Dot(eye)),\r
+ z.x,\r
+ z.y,\r
+ z.z,\r
+ -(z.Dot(eye)),\r
+ 0,\r
+ 0,\r
+ 0,\r
+ 1);\r
+ return m;\r
+ }\r
+\r
+ // PerspectiveRH creates a right-handed perspective projection matrix that can be\r
+ // used with the Oculus sample renderer.\r
+ // yfov - Specifies vertical field of view in radians.\r
+ // aspect - Screen aspect ration, which is usually width/height for square pixels.\r
+ // Note that xfov = yfov * aspect.\r
+ // znear - Absolute value of near Z clipping clipping range.\r
+ // zfar - Absolute value of far Z clipping clipping range (larger then near).\r
+ // Even though RHS usually looks in the direction of negative Z, positive values\r
+ // are expected for znear and zfar.\r
+ static Matrix4 PerspectiveRH(T yfov, T aspect, T znear, T zfar) {\r
+ Matrix4 m;\r
+ T tanHalfFov = (T)tan(yfov * T(0.5));\r
+\r
+ m.M[0][0] = T(1) / (aspect * tanHalfFov);\r
+ m.M[1][1] = T(1) / tanHalfFov;\r
+ m.M[2][2] = zfar / (znear - zfar);\r
+ m.M[3][2] = T(-1);\r
+ m.M[2][3] = (zfar * znear) / (znear - zfar);\r
+ m.M[3][3] = T(0);\r
+\r
+ // Note: Post-projection matrix result assumes Left-Handed coordinate system,\r
+ // with Y up, X right and Z forward. This supports positive z-buffer values.\r
+ // This is the case even for RHS coordinate input.\r
+ return m;\r
+ }\r
+\r
+ // PerspectiveLH creates a left-handed perspective projection matrix that can be\r
+ // used with the Oculus sample renderer.\r
+ // yfov - Specifies vertical field of view in radians.\r
+ // aspect - Screen aspect ration, which is usually width/height for square pixels.\r
+ // Note that xfov = yfov * aspect.\r
+ // znear - Absolute value of near Z clipping clipping range.\r
+ // zfar - Absolute value of far Z clipping clipping range (larger then near).\r
+ static Matrix4 PerspectiveLH(T yfov, T aspect, T znear, T zfar) {\r
+ Matrix4 m;\r
+ T tanHalfFov = (T)tan(yfov * T(0.5));\r
+\r
+ m.M[0][0] = T(1) / (aspect * tanHalfFov);\r
+ m.M[1][1] = T(1) / tanHalfFov;\r
+ // m.M[2][2] = zfar / (znear - zfar);\r
+ m.M[2][2] = zfar / (zfar - znear);\r
+ m.M[3][2] = T(-1);\r
+ m.M[2][3] = (zfar * znear) / (znear - zfar);\r
+ m.M[3][3] = T(0);\r
+\r
+ // Note: Post-projection matrix result assumes Left-Handed coordinate system,\r
+ // with Y up, X right and Z forward. This supports positive z-buffer values.\r
+ // This is the case even for RHS coordinate input.\r
+ return m;\r
+ }\r
+\r
+ static Matrix4 Ortho2D(T w, T h) {\r
+ Matrix4 m;\r
+ m.M[0][0] = T(2.0) / w;\r
+ m.M[1][1] = T(-2.0) / h;\r
+ m.M[0][3] = T(-1.0);\r
+ m.M[1][3] = T(1.0);\r
+ m.M[2][2] = T(0);\r
+ return m;\r
+ }\r
+};\r
+\r
+typedef Matrix4<float> Matrix4f;\r
+typedef Matrix4<double> Matrix4d;\r
+\r
+//-------------------------------------------------------------------------------------\r
+// ***** Matrix3\r
+//\r
+// Matrix3 is a 3x3 matrix used for representing a rotation matrix.\r
+// The matrix is stored in row-major order in memory, meaning that values\r
+// of the first row are stored before the next one.\r
+//\r
+// The arrangement of the matrix is chosen to be in Right-Handed\r
+// coordinate system and counterclockwise rotations when looking down\r
+// the axis\r
+//\r
+// Transformation Order:\r
+// - Transformations are applied from right to left, so the expression\r
+// M1 * M2 * M3 * V means that the vector V is transformed by M3 first,\r
+// followed by M2 and M1.\r
+//\r
+// Coordinate system: Right Handed\r
+//\r
+// Rotations: Counterclockwise when looking down the axis. All angles are in radians.\r
+\r
+template <class T>\r
+class Matrix3 {\r
+ public:\r
+ typedef T ElementType;\r
+ static const size_t Dimension = 3;\r
+\r
+ T M[3][3];\r
+\r
+ enum NoInitType { NoInit };\r
+\r
+ // Construct with no memory initialization.\r
+ Matrix3(NoInitType) {}\r
+\r
+ // By default, we construct identity matrix.\r
+ Matrix3() {\r
+ M[0][0] = M[1][1] = M[2][2] = T(1);\r
+ M[0][1] = M[1][0] = M[2][0] = T(0);\r
+ M[0][2] = M[1][2] = M[2][1] = T(0);\r
+ }\r
+\r
+ Matrix3(T m11, T m12, T m13, T m21, T m22, T m23, T m31, T m32, T m33) {\r
+ M[0][0] = m11;\r
+ M[0][1] = m12;\r
+ M[0][2] = m13;\r
+ M[1][0] = m21;\r
+ M[1][1] = m22;\r
+ M[1][2] = m23;\r
+ M[2][0] = m31;\r
+ M[2][1] = m32;\r
+ M[2][2] = m33;\r
+ }\r
+\r
+ // Construction from X, Y, Z basis vectors\r
+ Matrix3(const Vector3<T>& xBasis, const Vector3<T>& yBasis, const Vector3<T>& zBasis) {\r
+ M[0][0] = xBasis.x;\r
+ M[0][1] = yBasis.x;\r
+ M[0][2] = zBasis.x;\r
+ M[1][0] = xBasis.y;\r
+ M[1][1] = yBasis.y;\r
+ M[1][2] = zBasis.y;\r
+ M[2][0] = xBasis.z;\r
+ M[2][1] = yBasis.z;\r
+ M[2][2] = zBasis.z;\r
+ }\r
+\r
+ explicit Matrix3(const Quat<T>& q) {\r
+ OVR_MATH_ASSERT(q.IsNormalized()); // If this fires, caller has a quat math bug\r
+ const T tx = q.x + q.x, ty = q.y + q.y, tz = q.z + q.z;\r
+ const T twx = q.w * tx, twy = q.w * ty, twz = q.w * tz;\r
+ const T txx = q.x * tx, txy = q.x * ty, txz = q.x * tz;\r
+ const T tyy = q.y * ty, tyz = q.y * tz, tzz = q.z * tz;\r
+ M[0][0] = T(1) - (tyy + tzz);\r
+ M[0][1] = txy - twz;\r
+ M[0][2] = txz + twy;\r
+ M[1][0] = txy + twz;\r
+ M[1][1] = T(1) - (txx + tzz);\r
+ M[1][2] = tyz - twx;\r
+ M[2][0] = txz - twy;\r
+ M[2][1] = tyz + twx;\r
+ M[2][2] = T(1) - (txx + tyy);\r
+ }\r
+\r
+ inline explicit Matrix3(T s) {\r
+ M[0][0] = M[1][1] = M[2][2] = s;\r
+ M[0][1] = M[0][2] = M[1][0] = M[1][2] = M[2][0] = M[2][1] = T(0);\r
+ }\r
+\r
+ Matrix3(T m11, T m22, T m33) {\r
+ M[0][0] = m11;\r
+ M[0][1] = T(0);\r
+ M[0][2] = T(0);\r
+ M[1][0] = T(0);\r
+ M[1][1] = m22;\r
+ M[1][2] = T(0);\r
+ M[2][0] = T(0);\r
+ M[2][1] = T(0);\r
+ M[2][2] = m33;\r
+ }\r
+\r
+ explicit Matrix3(const Matrix3<typename Math<T>::OtherFloatType>& src) {\r
+ for (int i = 0; i < 3; i++)\r
+ for (int j = 0; j < 3; j++)\r
+ M[i][j] = (T)src.M[i][j];\r
+ }\r
+\r
+ // C-interop support.\r
+ Matrix3(const typename CompatibleTypes<Matrix3<T>>::Type& s) {\r
+ OVR_MATH_STATIC_ASSERT(sizeof(s) == sizeof(Matrix3), "sizeof(s) == sizeof(Matrix3)");\r
+ memcpy(M, s.M, sizeof(M));\r
+ }\r
+\r
+ operator const typename CompatibleTypes<Matrix3<T>>::Type() const {\r
+ typename CompatibleTypes<Matrix3<T>>::Type result;\r
+ OVR_MATH_STATIC_ASSERT(sizeof(result) == sizeof(Matrix3), "sizeof(result) == sizeof(Matrix3)");\r
+ memcpy(result.M, M, sizeof(M));\r
+ return result;\r
+ }\r
+\r
+ T operator()(int i, int j) const {\r
+ return M[i][j];\r
+ }\r
+ T& operator()(int i, int j) {\r
+ return M[i][j];\r
+ }\r
+\r
+ void ToString(char* dest, size_t destsize) const {\r
+ size_t pos = 0;\r
+ for (int r = 0; r < 3; r++) {\r
+ for (int c = 0; c < 3; c++)\r
+ pos += OVRMath_sprintf(dest + pos, destsize - pos, "%g ", M[r][c]);\r
+ }\r
+ }\r
+\r
+ static Matrix3 FromString(const char* src) {\r
+ Matrix3 result;\r
+ if (src) {\r
+ for (int r = 0; r < 3; r++) {\r
+ for (int c = 0; c < 3; c++) {\r
+ result.M[r][c] = (T)atof(src);\r
+ while (*src && *src != ' ')\r
+ src++;\r
+ while (*src && *src == ' ')\r
+ src++;\r
+ }\r
+ }\r
+ }\r
+ return result;\r
+ }\r
+\r
+ static Matrix3 Identity() {\r
+ return Matrix3();\r
+ }\r
+\r
+ void SetIdentity() {\r
+ M[0][0] = M[1][1] = M[2][2] = T(1);\r
+ M[0][1] = M[1][0] = M[2][0] = T(0);\r
+ M[0][2] = M[1][2] = M[2][1] = T(0);\r
+ }\r
+\r
+ static Matrix3 Diagonal(T m00, T m11, T m22) {\r
+ return Matrix3(m00, 0, 0, 0, m11, 0, 0, 0, m22);\r
+ }\r
+ static Matrix3 Diagonal(const Vector3<T>& v) {\r
+ return Diagonal(v.x, v.y, v.z);\r
+ }\r
+\r
+ T Trace() const {\r
+ return M[0][0] + M[1][1] + M[2][2];\r
+ }\r
+\r
+ bool operator==(const Matrix3& b) const {\r
+ bool isEqual = true;\r
+ for (int i = 0; i < 3; i++) {\r
+ for (int j = 0; j < 3; j++)\r
+ isEqual &= (M[i][j] == b.M[i][j]);\r
+ }\r
+\r
+ return isEqual;\r
+ }\r
+\r
+ Matrix3 operator+(const Matrix3& b) const {\r
+ Matrix3<T> result(*this);\r
+ result += b;\r
+ return result;\r
+ }\r
+\r
+ Matrix3& operator+=(const Matrix3& b) {\r
+ for (int i = 0; i < 3; i++)\r
+ for (int j = 0; j < 3; j++)\r
+ M[i][j] += b.M[i][j];\r
+ return *this;\r
+ }\r
+\r
+ void operator=(const Matrix3& b) {\r
+ for (int i = 0; i < 3; i++)\r
+ for (int j = 0; j < 3; j++)\r
+ M[i][j] = b.M[i][j];\r
+ }\r
+\r
+ Matrix3 operator-(const Matrix3& b) const {\r
+ Matrix3 result(*this);\r
+ result -= b;\r
+ return result;\r
+ }\r
+\r
+ Matrix3& operator-=(const Matrix3& b) {\r
+ for (int i = 0; i < 3; i++) {\r
+ for (int j = 0; j < 3; j++)\r
+ M[i][j] -= b.M[i][j];\r
+ }\r
+\r
+ return *this;\r
+ }\r
+\r
+ // Multiplies two matrices into destination with minimum copying.\r
+ static Matrix3& Multiply(Matrix3* d, const Matrix3& a, const Matrix3& b) {\r
+ OVR_MATH_ASSERT((d != &a) && (d != &b));\r
+ int i = 0;\r
+ do {\r
+ d->M[i][0] = a.M[i][0] * b.M[0][0] + a.M[i][1] * b.M[1][0] + a.M[i][2] * b.M[2][0];\r
+ d->M[i][1] = a.M[i][0] * b.M[0][1] + a.M[i][1] * b.M[1][1] + a.M[i][2] * b.M[2][1];\r
+ d->M[i][2] = a.M[i][0] * b.M[0][2] + a.M[i][1] * b.M[1][2] + a.M[i][2] * b.M[2][2];\r
+ } while ((++i) < 3);\r
+\r
+ return *d;\r
+ }\r
+\r
+ Matrix3 operator*(const Matrix3& b) const {\r
+ Matrix3 result(Matrix3::NoInit);\r
+ Multiply(&result, *this, b);\r
+ return result;\r
+ }\r
+\r
+ Matrix3& operator*=(const Matrix3& b) {\r
+ return Multiply(this, Matrix3(*this), b);\r
+ }\r
+\r
+ Matrix3 operator*(T s) const {\r
+ Matrix3 result(*this);\r
+ result *= s;\r
+ return result;\r
+ }\r
+\r
+ Matrix3& operator*=(T s) {\r
+ for (int i = 0; i < 3; i++) {\r
+ for (int j = 0; j < 3; j++)\r
+ M[i][j] *= s;\r
+ }\r
+\r
+ return *this;\r
+ }\r
+\r
+ Vector3<T> operator*(const Vector3<T>& b) const {\r
+ Vector3<T> result;\r
+ result.x = M[0][0] * b.x + M[0][1] * b.y + M[0][2] * b.z;\r
+ result.y = M[1][0] * b.x + M[1][1] * b.y + M[1][2] * b.z;\r
+ result.z = M[2][0] * b.x + M[2][1] * b.y + M[2][2] * b.z;\r
+\r
+ return result;\r
+ }\r
+\r
+ Matrix3 operator/(T s) const {\r
+ Matrix3 result(*this);\r
+ result /= s;\r
+ return result;\r
+ }\r
+\r
+ Matrix3& operator/=(T s) {\r
+ for (int i = 0; i < 3; i++) {\r
+ for (int j = 0; j < 3; j++)\r
+ M[i][j] /= s;\r
+ }\r
+\r
+ return *this;\r
+ }\r
+\r
+ Vector2<T> Transform(const Vector2<T>& v) const {\r
+ const T rcpZ = T(1) / (M[2][0] * v.x + M[2][1] * v.y + M[2][2]);\r
+ return Vector2<T>(\r
+ (M[0][0] * v.x + M[0][1] * v.y + M[0][2]) * rcpZ,\r
+ (M[1][0] * v.x + M[1][1] * v.y + M[1][2]) * rcpZ);\r
+ }\r
+\r
+ Vector3<T> Transform(const Vector3<T>& v) const {\r
+ return Vector3<T>(\r
+ M[0][0] * v.x + M[0][1] * v.y + M[0][2] * v.z,\r
+ M[1][0] * v.x + M[1][1] * v.y + M[1][2] * v.z,\r
+ M[2][0] * v.x + M[2][1] * v.y + M[2][2] * v.z);\r
+ }\r
+\r
+ Matrix3 Transposed() const {\r
+ return Matrix3(M[0][0], M[1][0], M[2][0], M[0][1], M[1][1], M[2][1], M[0][2], M[1][2], M[2][2]);\r
+ }\r
+\r
+ void Transpose() {\r
+ *this = Transposed();\r
+ }\r
+\r
+ T SubDet(const size_t* rows, const size_t* cols) const {\r
+ return M[rows[0]][cols[0]] *\r
+ (M[rows[1]][cols[1]] * M[rows[2]][cols[2]] - M[rows[1]][cols[2]] * M[rows[2]][cols[1]]) -\r
+ M[rows[0]][cols[1]] *\r
+ (M[rows[1]][cols[0]] * M[rows[2]][cols[2]] - M[rows[1]][cols[2]] * M[rows[2]][cols[0]]) +\r
+ M[rows[0]][cols[2]] *\r
+ (M[rows[1]][cols[0]] * M[rows[2]][cols[1]] - M[rows[1]][cols[1]] * M[rows[2]][cols[0]]);\r
+ }\r
+\r
+ // M += a*b.t()\r
+ inline void Rank1Add(const Vector3<T>& a, const Vector3<T>& b) {\r
+ M[0][0] += a.x * b.x;\r
+ M[0][1] += a.x * b.y;\r
+ M[0][2] += a.x * b.z;\r
+ M[1][0] += a.y * b.x;\r
+ M[1][1] += a.y * b.y;\r
+ M[1][2] += a.y * b.z;\r
+ M[2][0] += a.z * b.x;\r
+ M[2][1] += a.z * b.y;\r
+ M[2][2] += a.z * b.z;\r
+ }\r
+\r
+ // M -= a*b.t()\r
+ inline void Rank1Sub(const Vector3<T>& a, const Vector3<T>& b) {\r
+ M[0][0] -= a.x * b.x;\r
+ M[0][1] -= a.x * b.y;\r
+ M[0][2] -= a.x * b.z;\r
+ M[1][0] -= a.y * b.x;\r
+ M[1][1] -= a.y * b.y;\r
+ M[1][2] -= a.y * b.z;\r
+ M[2][0] -= a.z * b.x;\r
+ M[2][1] -= a.z * b.y;\r
+ M[2][2] -= a.z * b.z;\r
+ }\r
+\r
+ inline Vector3<T> Col(int c) const {\r
+ return Vector3<T>(M[0][c], M[1][c], M[2][c]);\r
+ }\r
+\r
+ inline Vector3<T> Row(int r) const {\r
+ return Vector3<T>(M[r][0], M[r][1], M[r][2]);\r
+ }\r
+\r
+ inline Vector3<T> GetColumn(int c) const {\r
+ return Vector3<T>(M[0][c], M[1][c], M[2][c]);\r
+ }\r
+\r
+ inline Vector3<T> GetRow(int r) const {\r
+ return Vector3<T>(M[r][0], M[r][1], M[r][2]);\r
+ }\r
+\r
+ inline void SetColumn(int c, const Vector3<T>& v) {\r
+ M[0][c] = v.x;\r
+ M[1][c] = v.y;\r
+ M[2][c] = v.z;\r
+ }\r
+\r
+ inline void SetRow(int r, const Vector3<T>& v) {\r
+ M[r][0] = v.x;\r
+ M[r][1] = v.y;\r
+ M[r][2] = v.z;\r
+ }\r
+\r
+ inline T Determinant() const {\r
+ const Matrix3<T>& m = *this;\r
+ T d;\r
+\r
+ d = m.M[0][0] * (m.M[1][1] * m.M[2][2] - m.M[1][2] * m.M[2][1]);\r
+ d -= m.M[0][1] * (m.M[1][0] * m.M[2][2] - m.M[1][2] * m.M[2][0]);\r
+ d += m.M[0][2] * (m.M[1][0] * m.M[2][1] - m.M[1][1] * m.M[2][0]);\r
+\r
+ return d;\r
+ }\r
+\r
+ inline Matrix3<T> Inverse() const {\r
+ Matrix3<T> a;\r
+ const Matrix3<T>& m = *this;\r
+ T d = Determinant();\r
+\r
+ OVR_MATH_ASSERT(d != 0);\r
+ T s = T(1) / d;\r
+\r
+ a.M[0][0] = s * (m.M[1][1] * m.M[2][2] - m.M[1][2] * m.M[2][1]);\r
+ a.M[1][0] = s * (m.M[1][2] * m.M[2][0] - m.M[1][0] * m.M[2][2]);\r
+ a.M[2][0] = s * (m.M[1][0] * m.M[2][1] - m.M[1][1] * m.M[2][0]);\r
+\r
+ a.M[0][1] = s * (m.M[0][2] * m.M[2][1] - m.M[0][1] * m.M[2][2]);\r
+ a.M[1][1] = s * (m.M[0][0] * m.M[2][2] - m.M[0][2] * m.M[2][0]);\r
+ a.M[2][1] = s * (m.M[0][1] * m.M[2][0] - m.M[0][0] * m.M[2][1]);\r
+\r
+ a.M[0][2] = s * (m.M[0][1] * m.M[1][2] - m.M[0][2] * m.M[1][1]);\r
+ a.M[1][2] = s * (m.M[0][2] * m.M[1][0] - m.M[0][0] * m.M[1][2]);\r
+ a.M[2][2] = s * (m.M[0][0] * m.M[1][1] - m.M[0][1] * m.M[1][0]);\r
+\r
+ return a;\r
+ }\r
+\r
+ // Outer Product of two column vectors: a * b.Transpose()\r
+ static Matrix3 OuterProduct(const Vector3<T>& a, const Vector3<T>& b) {\r
+ return Matrix3(\r
+ a.x * b.x,\r
+ a.x * b.y,\r
+ a.x * b.z,\r
+ a.y * b.x,\r
+ a.y * b.y,\r
+ a.y * b.z,\r
+ a.z * b.x,\r
+ a.z * b.y,\r
+ a.z * b.z);\r
+ }\r
+\r
+ // Vector cross product as a premultiply matrix:\r
+ // L.Cross(R) = LeftCrossAsMatrix(L) * R\r
+ static Matrix3 LeftCrossAsMatrix(const Vector3<T>& L) {\r
+ return Matrix3(T(0), -L.z, +L.y, +L.z, T(0), -L.x, -L.y, +L.x, T(0));\r
+ }\r
+\r
+ // Vector cross product as a premultiply matrix:\r
+ // L.Cross(R) = RightCrossAsMatrix(R) * L\r
+ static Matrix3 RightCrossAsMatrix(const Vector3<T>& R) {\r
+ return Matrix3(T(0), +R.z, -R.y, -R.z, T(0), +R.x, +R.y, -R.x, T(0));\r
+ }\r
+\r
+ // Angle in radians of a rotation matrix\r
+ // Uses identity trace(a) = 2*cos(theta) + 1\r
+ T Angle() const {\r
+ return Acos((Trace() - T(1)) * T(0.5));\r
+ }\r
+\r
+ // Angle in radians between two rotation matrices\r
+ T Angle(const Matrix3& b) const {\r
+ // Compute trace of (this->Transposed() * b)\r
+ // This works out to sum of products of elements.\r
+ T trace = T(0);\r
+ for (int i = 0; i < 3; i++) {\r
+ for (int j = 0; j < 3; j++) {\r
+ trace += M[i][j] * b.M[i][j];\r
+ }\r
+ }\r
+ return Acos((trace - T(1)) * T(0.5));\r
+ }\r
+};\r
+\r
+typedef Matrix3<float> Matrix3f;\r
+typedef Matrix3<double> Matrix3d;\r
+\r
+//-------------------------------------------------------------------------------------\r
+// ***** Matrix2\r
+\r
+template <class T>\r
+class Matrix2 {\r
+ public:\r
+ typedef T ElementType;\r
+ static const size_t Dimension = 2;\r
+\r
+ T M[2][2];\r
+\r
+ enum NoInitType { NoInit };\r
+\r
+ // Construct with no memory initialization.\r
+ Matrix2(NoInitType) {}\r
+\r
+ // By default, we construct identity matrix.\r
+ Matrix2() {\r
+ M[0][0] = M[1][1] = T(1);\r
+ M[0][1] = M[1][0] = T(0);\r
+ }\r
+\r
+ Matrix2(T m11, T m12, T m21, T m22) {\r
+ M[0][0] = m11;\r
+ M[0][1] = m12;\r
+ M[1][0] = m21;\r
+ M[1][1] = m22;\r
+ }\r
+\r
+ // Construction from X, Y basis vectors\r
+ Matrix2(const Vector2<T>& xBasis, const Vector2<T>& yBasis) {\r
+ M[0][0] = xBasis.x;\r
+ M[0][1] = yBasis.x;\r
+ M[1][0] = xBasis.y;\r
+ M[1][1] = yBasis.y;\r
+ }\r
+\r
+ explicit Matrix2(T s) {\r
+ M[0][0] = M[1][1] = s;\r
+ M[0][1] = M[1][0] = T(0);\r
+ }\r
+\r
+ Matrix2(T m11, T m22) {\r
+ M[0][0] = m11;\r
+ M[0][1] = T(0);\r
+ M[1][0] = T(0);\r
+ M[1][1] = m22;\r
+ }\r
+\r
+ explicit Matrix2(const Matrix2<typename Math<T>::OtherFloatType>& src) {\r
+ M[0][0] = T(src.M[0][0]);\r
+ M[0][1] = T(src.M[0][1]);\r
+ M[1][0] = T(src.M[1][0]);\r
+ M[1][1] = T(src.M[1][1]);\r
+ }\r
+\r
+ // C-interop support\r
+ Matrix2(const typename CompatibleTypes<Matrix2<T>>::Type& s) {\r
+ OVR_MATH_STATIC_ASSERT(sizeof(s) == sizeof(Matrix2), "sizeof(s) == sizeof(Matrix2)");\r
+ memcpy(M, s.M, sizeof(M));\r
+ }\r
+\r
+ operator const typename CompatibleTypes<Matrix2<T>>::Type() const {\r
+ typename CompatibleTypes<Matrix2<T>>::Type result;\r
+ OVR_MATH_STATIC_ASSERT(sizeof(result) == sizeof(Matrix2), "sizeof(result) == sizeof(Matrix2)");\r
+ memcpy(result.M, M, sizeof(M));\r
+ return result;\r
+ }\r
+\r
+ T operator()(int i, int j) const {\r
+ return M[i][j];\r
+ }\r
+ T& operator()(int i, int j) {\r
+ return M[i][j];\r
+ }\r
+ const T* operator[](int i) const {\r
+ return M[i];\r
+ }\r
+ T* operator[](int i) {\r
+ return M[i];\r
+ }\r
+\r
+ static Matrix2 Identity() {\r
+ return Matrix2();\r
+ }\r
+\r
+ void SetIdentity() {\r
+ M[0][0] = M[1][1] = T(1);\r
+ M[0][1] = M[1][0] = T(0);\r
+ }\r
+\r
+ static Matrix2 Diagonal(T m00, T m11) {\r
+ return Matrix2(m00, m11);\r
+ }\r
+ static Matrix2 Diagonal(const Vector2<T>& v) {\r
+ return Matrix2(v.x, v.y);\r
+ }\r
+\r
+ T Trace() const {\r
+ return M[0][0] + M[1][1];\r
+ }\r
+\r
+ bool operator==(const Matrix2& b) const {\r
+ return M[0][0] == b.M[0][0] && M[0][1] == b.M[0][1] && M[1][0] == b.M[1][0] &&\r
+ M[1][1] == b.M[1][1];\r
+ }\r
+\r
+ Matrix2 operator+(const Matrix2& b) const {\r
+ return Matrix2(\r
+ M[0][0] + b.M[0][0], M[0][1] + b.M[0][1], M[1][0] + b.M[1][0], M[1][1] + b.M[1][1]);\r
+ }\r
+\r
+ Matrix2& operator+=(const Matrix2& b) {\r
+ M[0][0] += b.M[0][0];\r
+ M[0][1] += b.M[0][1];\r
+ M[1][0] += b.M[1][0];\r
+ M[1][1] += b.M[1][1];\r
+ return *this;\r
+ }\r
+\r
+ void operator=(const Matrix2& b) {\r
+ M[0][0] = b.M[0][0];\r
+ M[0][1] = b.M[0][1];\r
+ M[1][0] = b.M[1][0];\r
+ M[1][1] = b.M[1][1];\r
+ }\r
+\r
+ Matrix2 operator-(const Matrix2& b) const {\r
+ return Matrix2(\r
+ M[0][0] - b.M[0][0], M[0][1] - b.M[0][1], M[1][0] - b.M[1][0], M[1][1] - b.M[1][1]);\r
+ }\r
+\r
+ Matrix2& operator-=(const Matrix2& b) {\r
+ M[0][0] -= b.M[0][0];\r
+ M[0][1] -= b.M[0][1];\r
+ M[1][0] -= b.M[1][0];\r
+ M[1][1] -= b.M[1][1];\r
+ return *this;\r
+ }\r
+\r
+ Matrix2 operator*(const Matrix2& b) const {\r
+ return Matrix2(\r
+ M[0][0] * b.M[0][0] + M[0][1] * b.M[1][0],\r
+ M[0][0] * b.M[0][1] + M[0][1] * b.M[1][1],\r
+ M[1][0] * b.M[0][0] + M[1][1] * b.M[1][0],\r
+ M[1][0] * b.M[0][1] + M[1][1] * b.M[1][1]);\r
+ }\r
+\r
+ Matrix2& operator*=(const Matrix2& b) {\r
+ *this = *this * b;\r
+ return *this;\r
+ }\r
+\r
+ Matrix2 operator*(T s) const {\r
+ return Matrix2(M[0][0] * s, M[0][1] * s, M[1][0] * s, M[1][1] * s);\r
+ }\r
+\r
+ Matrix2& operator*=(T s) {\r
+ M[0][0] *= s;\r
+ M[0][1] *= s;\r
+ M[1][0] *= s;\r
+ M[1][1] *= s;\r
+ return *this;\r
+ }\r
+\r
+ Matrix2 operator/(T s) const {\r
+ return *this * (T(1) / s);\r
+ }\r
+\r
+ Matrix2& operator/=(T s) {\r
+ return *this *= (T(1) / s);\r
+ }\r
+\r
+ Vector2<T> operator*(const Vector2<T>& b) const {\r
+ return Vector2<T>(M[0][0] * b.x + M[0][1] * b.y, M[1][0] * b.x + M[1][1] * b.y);\r
+ }\r
+\r
+ Vector2<T> Transform(const Vector2<T>& v) const {\r
+ return Vector2<T>(M[0][0] * v.x + M[0][1] * v.y, M[1][0] * v.x + M[1][1] * v.y);\r
+ }\r
+\r
+ Matrix2 Transposed() const {\r
+ return Matrix2(M[0][0], M[1][0], M[0][1], M[1][1]);\r
+ }\r
+\r
+ void Transpose() {\r
+ OVRMath_Swap(M[1][0], M[0][1]);\r
+ }\r
+\r
+ Vector2<T> GetColumn(int c) const {\r
+ return Vector2<T>(M[0][c], M[1][c]);\r
+ }\r
+\r
+ Vector2<T> GetRow(int r) const {\r
+ return Vector2<T>(M[r][0], M[r][1]);\r
+ }\r
+\r
+ void SetColumn(int c, const Vector2<T>& v) {\r
+ M[0][c] = v.x;\r
+ M[1][c] = v.y;\r
+ }\r
+\r
+ void SetRow(int r, const Vector2<T>& v) {\r
+ M[r][0] = v.x;\r
+ M[r][1] = v.y;\r
+ }\r
+\r
+ T Determinant() const {\r
+ return M[0][0] * M[1][1] - M[0][1] * M[1][0];\r
+ }\r
+\r
+ Matrix2 Inverse() const {\r
+ T rcpDet = T(1) / Determinant();\r
+ return Matrix2(M[1][1] * rcpDet, -M[0][1] * rcpDet, -M[1][0] * rcpDet, M[0][0] * rcpDet);\r
+ }\r
+\r
+ // Outer Product of two column vectors: a * b.Transpose()\r
+ static Matrix2 OuterProduct(const Vector2<T>& a, const Vector2<T>& b) {\r
+ return Matrix2(a.x * b.x, a.x * b.y, a.y * b.x, a.y * b.y);\r
+ }\r
+\r
+ // Angle in radians between two rotation matrices\r
+ T Angle(const Matrix2& b) const {\r
+ const Matrix2& a = *this;\r
+ return Acos(a(0, 0) * b(0, 0) + a(1, 0) * b(1, 0));\r
+ }\r
+};\r
+\r
+typedef Matrix2<float> Matrix2f;\r
+typedef Matrix2<double> Matrix2d;\r
+\r
+//-------------------------------------------------------------------------------------\r
+\r
+template <class T>\r
+class SymMat3 {\r
+ private:\r
+ typedef SymMat3<T> this_type;\r
+\r
+ public:\r
+ typedef T Value_t;\r
+ // Upper symmetric\r
+ T v[6]; // _00 _01 _02 _11 _12 _22\r
+\r
+ inline SymMat3() {}\r
+\r
+ inline explicit SymMat3(T s) {\r
+ v[0] = v[3] = v[5] = s;\r
+ v[1] = v[2] = v[4] = T(0);\r
+ }\r
+\r
+ inline explicit SymMat3(T a00, T a01, T a02, T a11, T a12, T a22) {\r
+ v[0] = a00;\r
+ v[1] = a01;\r
+ v[2] = a02;\r
+ v[3] = a11;\r
+ v[4] = a12;\r
+ v[5] = a22;\r
+ }\r
+\r
+ // Cast to symmetric Matrix3\r
+ operator Matrix3<T>() const {\r
+ return Matrix3<T>(v[0], v[1], v[2], v[1], v[3], v[4], v[2], v[4], v[5]);\r
+ }\r
+\r
+ static inline int Index(unsigned int i, unsigned int j) {\r
+ return (i <= j) ? (3 * i - i * (i + 1) / 2 + j) : (3 * j - j * (j + 1) / 2 + i);\r
+ }\r
+\r
+ inline T operator()(int i, int j) const {\r
+ return v[Index(i, j)];\r
+ }\r
+\r
+ inline T& operator()(int i, int j) {\r
+ return v[Index(i, j)];\r
+ }\r
+\r
+ inline this_type& operator+=(const this_type& b) {\r
+ v[0] += b.v[0];\r
+ v[1] += b.v[1];\r
+ v[2] += b.v[2];\r
+ v[3] += b.v[3];\r
+ v[4] += b.v[4];\r
+ v[5] += b.v[5];\r
+ return *this;\r
+ }\r
+\r
+ inline this_type& operator-=(const this_type& b) {\r
+ v[0] -= b.v[0];\r
+ v[1] -= b.v[1];\r
+ v[2] -= b.v[2];\r
+ v[3] -= b.v[3];\r
+ v[4] -= b.v[4];\r
+ v[5] -= b.v[5];\r
+\r
+ return *this;\r
+ }\r
+\r
+ inline this_type& operator*=(T s) {\r
+ v[0] *= s;\r
+ v[1] *= s;\r
+ v[2] *= s;\r
+ v[3] *= s;\r
+ v[4] *= s;\r
+ v[5] *= s;\r
+\r
+ return *this;\r
+ }\r
+\r
+ inline SymMat3 operator*(T s) const {\r
+ SymMat3 d;\r
+ d.v[0] = v[0] * s;\r
+ d.v[1] = v[1] * s;\r
+ d.v[2] = v[2] * s;\r
+ d.v[3] = v[3] * s;\r
+ d.v[4] = v[4] * s;\r
+ d.v[5] = v[5] * s;\r
+\r
+ return d;\r
+ }\r
+\r
+ // Multiplies two matrices into destination with minimum copying.\r
+ static SymMat3& Multiply(SymMat3* d, const SymMat3& a, const SymMat3& b) {\r
+ // _00 _01 _02 _11 _12 _22\r
+\r
+ d->v[0] = a.v[0] * b.v[0];\r
+ d->v[1] = a.v[0] * b.v[1] + a.v[1] * b.v[3];\r
+ d->v[2] = a.v[0] * b.v[2] + a.v[1] * b.v[4];\r
+\r
+ d->v[3] = a.v[3] * b.v[3];\r
+ d->v[4] = a.v[3] * b.v[4] + a.v[4] * b.v[5];\r
+\r
+ d->v[5] = a.v[5] * b.v[5];\r
+\r
+ return *d;\r
+ }\r
+\r
+ inline T Determinant() const {\r
+ const this_type& m = *this;\r
+ T d;\r
+\r
+ d = m(0, 0) * (m(1, 1) * m(2, 2) - m(1, 2) * m(2, 1));\r
+ d -= m(0, 1) * (m(1, 0) * m(2, 2) - m(1, 2) * m(2, 0));\r
+ d += m(0, 2) * (m(1, 0) * m(2, 1) - m(1, 1) * m(2, 0));\r
+\r
+ return d;\r
+ }\r
+\r
+ inline this_type Inverse() const {\r
+ this_type a;\r
+ const this_type& m = *this;\r
+ T d = Determinant();\r
+\r
+ OVR_MATH_ASSERT(d != 0);\r
+ T s = T(1) / d;\r
+\r
+ a(0, 0) = s * (m(1, 1) * m(2, 2) - m(1, 2) * m(2, 1));\r
+\r
+ a(0, 1) = s * (m(0, 2) * m(2, 1) - m(0, 1) * m(2, 2));\r
+ a(1, 1) = s * (m(0, 0) * m(2, 2) - m(0, 2) * m(2, 0));\r
+\r
+ a(0, 2) = s * (m(0, 1) * m(1, 2) - m(0, 2) * m(1, 1));\r
+ a(1, 2) = s * (m(0, 2) * m(1, 0) - m(0, 0) * m(1, 2));\r
+ a(2, 2) = s * (m(0, 0) * m(1, 1) - m(0, 1) * m(1, 0));\r
+\r
+ return a;\r
+ }\r
+\r
+ inline T Trace() const {\r
+ return v[0] + v[3] + v[5];\r
+ }\r
+\r
+ // M = a*a.t()\r
+ inline void Rank1(const Vector3<T>& a) {\r
+ v[0] = a.x * a.x;\r
+ v[1] = a.x * a.y;\r
+ v[2] = a.x * a.z;\r
+ v[3] = a.y * a.y;\r
+ v[4] = a.y * a.z;\r
+ v[5] = a.z * a.z;\r
+ }\r
+\r
+ // M += a*a.t()\r
+ inline void Rank1Add(const Vector3<T>& a) {\r
+ v[0] += a.x * a.x;\r
+ v[1] += a.x * a.y;\r
+ v[2] += a.x * a.z;\r
+ v[3] += a.y * a.y;\r
+ v[4] += a.y * a.z;\r
+ v[5] += a.z * a.z;\r
+ }\r
+\r
+ // M -= a*a.t()\r
+ inline void Rank1Sub(const Vector3<T>& a) {\r
+ v[0] -= a.x * a.x;\r
+ v[1] -= a.x * a.y;\r
+ v[2] -= a.x * a.z;\r
+ v[3] -= a.y * a.y;\r
+ v[4] -= a.y * a.z;\r
+ v[5] -= a.z * a.z;\r
+ }\r
+};\r
+\r
+typedef SymMat3<float> SymMat3f;\r
+typedef SymMat3<double> SymMat3d;\r
+\r
+template <class T>\r
+inline Matrix3<T> operator*(const SymMat3<T>& a, const SymMat3<T>& b) {\r
+#define AJB_ARBC(r, c) (a(r, 0) * b(0, c) + a(r, 1) * b(1, c) + a(r, 2) * b(2, c))\r
+ return Matrix3<T>(\r
+ AJB_ARBC(0, 0),\r
+ AJB_ARBC(0, 1),\r
+ AJB_ARBC(0, 2),\r
+ AJB_ARBC(1, 0),\r
+ AJB_ARBC(1, 1),\r
+ AJB_ARBC(1, 2),\r
+ AJB_ARBC(2, 0),\r
+ AJB_ARBC(2, 1),\r
+ AJB_ARBC(2, 2));\r
+#undef AJB_ARBC\r
+}\r
+\r
+template <class T>\r
+inline Matrix3<T> operator*(const Matrix3<T>& a, const SymMat3<T>& b) {\r
+#define AJB_ARBC(r, c) (a(r, 0) * b(0, c) + a(r, 1) * b(1, c) + a(r, 2) * b(2, c))\r
+ return Matrix3<T>(\r
+ AJB_ARBC(0, 0),\r
+ AJB_ARBC(0, 1),\r
+ AJB_ARBC(0, 2),\r
+ AJB_ARBC(1, 0),\r
+ AJB_ARBC(1, 1),\r
+ AJB_ARBC(1, 2),\r
+ AJB_ARBC(2, 0),\r
+ AJB_ARBC(2, 1),\r
+ AJB_ARBC(2, 2));\r
+#undef AJB_ARBC\r
+}\r
+\r
+//-------------------------------------------------------------------------------------\r
+// ***** Angle\r
+\r
+// Cleanly representing the algebra of 2D rotations.\r
+// The operations maintain the angle between -Pi and Pi, the same range as atan2.\r
+\r
+template <class T>\r
+class Angle {\r
+ public:\r
+ enum AngularUnits { Radians = 0, Degrees = 1 };\r
+\r
+ Angle() : a(0) {}\r
+\r
+ // Fix the range to be between -Pi and Pi\r
+ Angle(T a_, AngularUnits u = Radians)\r
+ : a((u == Radians) ? a_ : a_ * ((T)MATH_DOUBLE_DEGREETORADFACTOR)) {\r
+ FixRange();\r
+ }\r
+\r
+ T Get(AngularUnits u = Radians) const {\r
+ return (u == Radians) ? a : a * ((T)MATH_DOUBLE_RADTODEGREEFACTOR);\r
+ }\r
+ void Set(const T& x, AngularUnits u = Radians) {\r
+ a = (u == Radians) ? x : x * ((T)MATH_DOUBLE_DEGREETORADFACTOR);\r
+ FixRange();\r
+ }\r
+ int Sign() const {\r
+ if (a == 0)\r
+ return 0;\r
+ else\r
+ return (a > 0) ? 1 : -1;\r
+ }\r
+ T Abs() const {\r
+ return (a >= 0) ? a : -a;\r
+ }\r
+\r
+ bool operator==(const Angle& b) const {\r
+ return a == b.a;\r
+ }\r
+ bool operator!=(const Angle& b) const {\r
+ return a != b.a;\r
+ }\r
+ // bool operator< (const Angle& b) const { return a < a.b; }\r
+ // bool operator> (const Angle& b) const { return a > a.b; }\r
+ // bool operator<= (const Angle& b) const { return a <= a.b; }\r
+ // bool operator>= (const Angle& b) const { return a >= a.b; }\r
+ // bool operator= (const T& x) { a = x; FixRange(); }\r
+\r
+ // These operations assume a is already between -Pi and Pi.\r
+ Angle& operator+=(const Angle& b) {\r
+ a = a + b.a;\r
+ FastFixRange();\r
+ return *this;\r
+ }\r
+ Angle& operator+=(const T& x) {\r
+ a = a + x;\r
+ FixRange();\r
+ return *this;\r
+ }\r
+ Angle operator+(const Angle& b) const {\r
+ Angle res = *this;\r
+ res += b;\r
+ return res;\r
+ }\r
+ Angle operator+(const T& x) const {\r
+ Angle res = *this;\r
+ res += x;\r
+ return res;\r
+ }\r
+ Angle& operator-=(const Angle& b) {\r
+ a = a - b.a;\r
+ FastFixRange();\r
+ return *this;\r
+ }\r
+ Angle& operator-=(const T& x) {\r
+ a = a - x;\r
+ FixRange();\r
+ return *this;\r
+ }\r
+ Angle operator-(const Angle& b) const {\r
+ Angle res = *this;\r
+ res -= b;\r
+ return res;\r
+ }\r
+ Angle operator-(const T& x) const {\r
+ Angle res = *this;\r
+ res -= x;\r
+ return res;\r
+ }\r
+\r
+ T Distance(const Angle& b) {\r
+ T c = fabs(a - b.a);\r
+ return (c <= ((T)MATH_DOUBLE_PI)) ? c : ((T)MATH_DOUBLE_TWOPI) - c;\r
+ }\r
+\r
+ private:\r
+ // The stored angle, which should be maintained between -Pi and Pi\r
+ T a;\r
+\r
+ // Fixes the angle range to [-Pi,Pi], but assumes no more than 2Pi away on either side\r
+ inline void FastFixRange() {\r
+ if (a < -((T)MATH_DOUBLE_PI))\r
+ a += ((T)MATH_DOUBLE_TWOPI);\r
+ else if (a > ((T)MATH_DOUBLE_PI))\r
+ a -= ((T)MATH_DOUBLE_TWOPI);\r
+ }\r
+\r
+ // Fixes the angle range to [-Pi,Pi] for any given range, but slower then the fast method\r
+ inline void FixRange() {\r
+ // do nothing if the value is already in the correct range, since fmod call is expensive\r
+ if (a >= -((T)MATH_DOUBLE_PI) && a <= ((T)MATH_DOUBLE_PI))\r
+ return;\r
+ a = fmod(a, ((T)MATH_DOUBLE_TWOPI));\r
+ if (a < -((T)MATH_DOUBLE_PI))\r
+ a += ((T)MATH_DOUBLE_TWOPI);\r
+ else if (a > ((T)MATH_DOUBLE_PI))\r
+ a -= ((T)MATH_DOUBLE_TWOPI);\r
+ }\r
+};\r
+\r
+typedef Angle<float> Anglef;\r
+typedef Angle<double> Angled;\r
+\r
+//-------------------------------------------------------------------------------------\r
+// ***** Plane\r
+\r
+// Consists of a normal vector and distance from the origin where the plane is located.\r
+\r
+template <class T>\r
+class Plane {\r
+ public:\r
+ Vector3<T> N;\r
+ T D;\r
+\r
+ Plane() : D(0) {}\r
+\r
+ // Normals must already be normalized\r
+ Plane(const Vector3<T>& n, T d) : N(n), D(d) {}\r
+ Plane(T x, T y, T z, T d) : N(x, y, z), D(d) {}\r
+\r
+ // construct from a point on the plane and the normal\r
+ Plane(const Vector3<T>& p, const Vector3<T>& n) : N(n), D(-(p.Dot(n))) {}\r
+\r
+ // Find the point to plane distance. The sign indicates what side of the plane the point is on (0\r
+ // = point on plane).\r
+ T TestSide(const Vector3<T>& p) const {\r
+ return (N.Dot(p)) + D;\r
+ }\r
+\r
+ Plane<T> Flipped() const {\r
+ return Plane(-N, -D);\r
+ }\r
+\r
+ void Flip() {\r
+ N = -N;\r
+ D = -D;\r
+ }\r
+\r
+ bool operator==(const Plane<T>& rhs) const {\r
+ return (this->D == rhs.D && this->N == rhs.N);\r
+ }\r
+};\r
+\r
+typedef Plane<float> Planef;\r
+typedef Plane<double> Planed;\r
+\r
+//-----------------------------------------------------------------------------------\r
+// ***** ScaleAndOffset2D\r
+\r
+struct ScaleAndOffset2D {\r
+ Vector2f Scale;\r
+ Vector2f Offset;\r
+\r
+ ScaleAndOffset2D(float sx = 0.0f, float sy = 0.0f, float ox = 0.0f, float oy = 0.0f)\r
+ : Scale(sx, sy), Offset(ox, oy) {}\r
+};\r
+\r
+//-----------------------------------------------------------------------------------\r
+// ***** FovPort\r
+\r
+// FovPort describes Field Of View (FOV) of a viewport.\r
+// This class has values for up, down, left and right, stored in\r
+// tangent of the angle units to simplify calculations.\r
+//\r
+// As an example, for a standard 90 degree vertical FOV, we would\r
+// have: { UpTan = tan(90 degrees / 2), DownTan = tan(90 degrees / 2) }.\r
+//\r
+// CreateFromRadians/Degrees helper functions can be used to\r
+// access FOV in different units.\r
+\r
+// ***** FovPort\r
+\r
+struct FovPort {\r
+ float UpTan;\r
+ float DownTan;\r
+ float LeftTan;\r
+ float RightTan;\r
+\r
+ FovPort(float sideTan = 0.0f)\r
+ : UpTan(sideTan), DownTan(sideTan), LeftTan(sideTan), RightTan(sideTan) {}\r
+ FovPort(float u, float d, float l, float r) : UpTan(u), DownTan(d), LeftTan(l), RightTan(r) {}\r
+\r
+#ifndef OVR_EXCLUDE_CAPI_FROM_MATH\r
+ // C-interop support.\r
+ typedef CompatibleTypes<FovPort>::Type CompatibleType;\r
+\r
+ FovPort(const CompatibleType& s)\r
+ : UpTan(s.UpTan), DownTan(s.DownTan), LeftTan(s.LeftTan), RightTan(s.RightTan) {}\r
+\r
+ operator const CompatibleType&() const {\r
+ OVR_MATH_STATIC_ASSERT(sizeof(FovPort) == sizeof(CompatibleType), "sizeof(FovPort) failure");\r
+ return reinterpret_cast<const CompatibleType&>(*this);\r
+ }\r
+#endif\r
+\r
+ static FovPort CreateFromRadians(float horizontalFov, float verticalFov) {\r
+ FovPort result;\r
+ result.UpTan = tanf(verticalFov * 0.5f);\r
+ result.DownTan = tanf(verticalFov * 0.5f);\r
+ result.LeftTan = tanf(horizontalFov * 0.5f);\r
+ result.RightTan = tanf(horizontalFov * 0.5f);\r
+ return result;\r
+ }\r
+\r
+ static FovPort CreateFromDegrees(float horizontalFovDegrees, float verticalFovDegrees) {\r
+ return CreateFromRadians(DegreeToRad(horizontalFovDegrees), DegreeToRad(verticalFovDegrees));\r
+ }\r
+\r
+ // Get Horizontal/Vertical components of Fov in radians.\r
+ float GetVerticalFovRadians() const {\r
+ return atanf(UpTan) + atanf(DownTan);\r
+ }\r
+ float GetHorizontalFovRadians() const {\r
+ return atanf(LeftTan) + atanf(RightTan);\r
+ }\r
+ // Get Horizontal/Vertical components of Fov in degrees.\r
+ float GetVerticalFovDegrees() const {\r
+ return RadToDegree(GetVerticalFovRadians());\r
+ }\r
+ float GetHorizontalFovDegrees() const {\r
+ return RadToDegree(GetHorizontalFovRadians());\r
+ }\r
+\r
+ // Compute maximum tangent value among all four sides.\r
+ float GetMaxSideTan() const {\r
+ return OVRMath_Max(OVRMath_Max(UpTan, DownTan), OVRMath_Max(LeftTan, RightTan));\r
+ }\r
+\r
+ static ScaleAndOffset2D CreateNDCScaleAndOffsetFromFov(FovPort tanHalfFov) {\r
+ float projXScale = 2.0f / (tanHalfFov.LeftTan + tanHalfFov.RightTan);\r
+ float projXOffset = (tanHalfFov.LeftTan - tanHalfFov.RightTan) * projXScale * 0.5f;\r
+ float projYScale = 2.0f / (tanHalfFov.UpTan + tanHalfFov.DownTan);\r
+ float projYOffset = (tanHalfFov.UpTan - tanHalfFov.DownTan) * projYScale * 0.5f;\r
+\r
+ ScaleAndOffset2D result;\r
+ result.Scale = Vector2f(projXScale, projYScale);\r
+ result.Offset = Vector2f(projXOffset, projYOffset);\r
+ // Hey - why is that Y.Offset negated?\r
+ // It's because a projection matrix transforms from world coords with Y=up,\r
+ // whereas this is from NDC which is Y=down.\r
+\r
+ return result;\r
+ }\r
+\r
+ // Converts Fov Tan angle units to [-1,1] render target NDC space\r
+ Vector2f TanAngleToRendertargetNDC(Vector2f const& tanEyeAngle) {\r
+ ScaleAndOffset2D eyeToSourceNDC = CreateNDCScaleAndOffsetFromFov(*this);\r
+ return tanEyeAngle * eyeToSourceNDC.Scale + eyeToSourceNDC.Offset;\r
+ }\r
+\r
+ // Compute per-channel minimum and maximum of Fov.\r
+ static FovPort Min(const FovPort& a, const FovPort& b) {\r
+ FovPort fov(\r
+ OVRMath_Min(a.UpTan, b.UpTan),\r
+ OVRMath_Min(a.DownTan, b.DownTan),\r
+ OVRMath_Min(a.LeftTan, b.LeftTan),\r
+ OVRMath_Min(a.RightTan, b.RightTan));\r
+ return fov;\r
+ }\r
+\r
+ static FovPort Max(const FovPort& a, const FovPort& b) {\r
+ FovPort fov(\r
+ OVRMath_Max(a.UpTan, b.UpTan),\r
+ OVRMath_Max(a.DownTan, b.DownTan),\r
+ OVRMath_Max(a.LeftTan, b.LeftTan),\r
+ OVRMath_Max(a.RightTan, b.RightTan));\r
+ return fov;\r
+ }\r
+\r
+ static FovPort Uncant(const FovPort& cantedFov, Quatf canting) {\r
+ FovPort uncantedFov = cantedFov;\r
+\r
+ // make 3D vectors from the FovPorts projected to z=1 plane\r
+ Vector3f leftUp = Vector3f(cantedFov.LeftTan, cantedFov.UpTan, 1.0f);\r
+ Vector3f rightUp = Vector3f(-cantedFov.RightTan, cantedFov.UpTan, 1.0f);\r
+ Vector3f leftDown = Vector3f(cantedFov.LeftTan, -cantedFov.DownTan, 1.0f);\r
+ Vector3f rightDown = Vector3f(-cantedFov.RightTan, -cantedFov.DownTan, 1.0f);\r
+\r
+ // rotate these vectors using the canting specified\r
+ leftUp = canting.Rotate(leftUp);\r
+ rightUp = canting.Rotate(rightUp);\r
+ leftDown = canting.Rotate(leftDown);\r
+ rightDown = canting.Rotate(rightDown);\r
+\r
+ // If the z coordinates of any of the corners end up being really small or negative, then\r
+ // projection will generate extremely large or inverted frustums and we don't really want that\r
+ const float kMinValidZ = 0.01f;\r
+\r
+ // re-project back to z=1 plane while making sure we don't generate gigantic values (hence max)\r
+ leftUp /= OVRMath_Max(leftUp.z, kMinValidZ);\r
+ rightUp /= OVRMath_Max(rightUp.z, kMinValidZ);\r
+ leftDown /= OVRMath_Max(leftDown.z, kMinValidZ);\r
+ rightDown /= OVRMath_Max(rightDown.z, kMinValidZ);\r
+\r
+ // generate new FovTans as "bounding box" values\r
+ uncantedFov.UpTan = OVRMath_Max(leftUp.y, rightUp.y);\r
+ uncantedFov.DownTan = OVRMath_Max(-leftDown.y, -rightDown.y);\r
+ uncantedFov.LeftTan = OVRMath_Max(leftUp.x, leftDown.x);\r
+ uncantedFov.RightTan = OVRMath_Max(-rightDown.x, -rightUp.x);\r
+\r
+ return uncantedFov;\r
+ }\r
+\r
+ template <class T>\r
+ static FovPort ScaleFovPort(const FovPort& fov, OVR::Vector2<T> scaleFactors) {\r
+ FovPort retFov = FovPort(fov);\r
+ retFov.LeftTan *= ((scaleFactors.x != 0.0) ? scaleFactors.x : 1.0f);\r
+ retFov.RightTan *= ((scaleFactors.x != 0.0) ? scaleFactors.x : 1.0f);\r
+ retFov.UpTan *= ((scaleFactors.y != 0.0) ? scaleFactors.y : 1.0f);\r
+ retFov.DownTan *= ((scaleFactors.y != 0.0) ? scaleFactors.y : 1.0f);\r
+ return retFov;\r
+ }\r
+};\r
+\r
+} // Namespace OVR\r
+\r
+#if defined(_MSC_VER)\r
+#pragma warning(pop)\r
+#endif\r
+\r
+#endif\r
--- /dev/null
+/************************************************************************************\r
+\r
+Filename : OVR_StereoProjection.h\r
+Content : Stereo projection functions\r
+Created : November 30, 2013\r
+Authors : Tom Fosyth\r
+\r
+Copyright : Copyright 2014-2016 Oculus VR, LLC All Rights reserved.\r
+\r
+Licensed under the Oculus VR Rift SDK License Version 3.3 (the "License");\r
+you may not use the Oculus VR Rift SDK except in compliance with the License,\r
+which is provided at the time of installation or download, or which\r
+otherwise accompanies this software in either electronic or hard copy form.\r
+\r
+You may obtain a copy of the License at\r
+\r
+http://www.oculusvr.com/licenses/LICENSE-3.3\r
+\r
+Unless required by applicable law or agreed to in writing, the Oculus VR SDK\r
+distributed under the License is distributed on an "AS IS" BASIS,\r
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+See the License for the specific language governing permissions and\r
+limitations under the License.\r
+\r
+*************************************************************************************/\r
+\r
+#ifndef OVR_StereoProjection_h\r
+#define OVR_StereoProjection_h\r
+\r
+#include "Extras/OVR_Math.h"\r
+\r
+namespace OVR {\r
+\r
+//-----------------------------------------------------------------------------------\r
+// ***** Stereo Enumerations\r
+\r
+// StereoEye specifies which eye we are rendering for; it is used to\r
+// retrieve StereoEyeParams.\r
+enum StereoEye { StereoEye_Left, StereoEye_Right, StereoEye_Center };\r
+\r
+//-----------------------------------------------------------------------------------\r
+// ***** Propjection functions\r
+\r
+Matrix4f CreateProjection(\r
+ bool rightHanded,\r
+ bool isOpenGL,\r
+ FovPort fov,\r
+ StereoEye eye,\r
+ float zNear = 0.01f,\r
+ float zFar = 10000.0f,\r
+ bool flipZ = false,\r
+ bool farAtInfinity = false);\r
+\r
+Matrix4f CreateOrthoSubProjection(\r
+ bool rightHanded,\r
+ StereoEye eyeType,\r
+ float tanHalfFovX,\r
+ float tanHalfFovY,\r
+ float unitsX,\r
+ float unitsY,\r
+ float distanceFromCamera,\r
+ float interpupillaryDistance,\r
+ Matrix4f const& projection,\r
+ float zNear = 0.0f,\r
+ float zFar = 0.0f,\r
+ bool flipZ = false,\r
+ bool farAtInfinity = false);\r
+\r
+ScaleAndOffset2D CreateNDCScaleAndOffsetFromFov(FovPort fov);\r
+\r
+} // namespace OVR\r
+\r
+#endif // OVR_StereoProjection_h\r
--- /dev/null
+/************************************************************************************\r
+ \file OVR_CAPI.h\r
+ \brief C Interface to the Oculus PC SDK tracking and rendering library.\r
+ \copyright Copyright 2014 Oculus VR, LLC All Rights reserved.\r
+ ************************************************************************************/\r
+\r
+// We don't use version numbers within OVR_CAPI_h, as all versioned variations\r
+// of this file are currently mutually exclusive.\r
+#ifndef OVR_CAPI_h\r
+#define OVR_CAPI_h\r
+\r
+#include "OVR_CAPI_Keys.h"\r
+#include "OVR_Version.h"\r
+#include "OVR_ErrorCode.h"\r
+\r
+#if !defined(_WIN32)\r
+#include <sys/types.h>\r
+#endif\r
+\r
+\r
+#include <stdint.h>\r
+\r
+#if defined(_MSC_VER)\r
+#pragma warning(push)\r
+#pragma warning(disable : 4324) // structure was padded due to __declspec(align())\r
+#pragma warning(disable : 4359) // The alignment specified for a type is less than the\r
+// alignment of the type of one of its data members\r
+#endif\r
+\r
+//-----------------------------------------------------------------------------------\r
+// ***** OVR_OS\r
+//\r
+#if !defined(OVR_OS_WIN32) && defined(_WIN32)\r
+#define OVR_OS_WIN32\r
+#endif\r
+\r
+#if !defined(OVR_OS_MAC) && defined(__APPLE__)\r
+#define OVR_OS_MAC\r
+#endif\r
+\r
+#if !defined(OVR_OS_LINUX) && defined(__linux__)\r
+#define OVR_OS_LINUX\r
+#endif\r
+\r
+//-----------------------------------------------------------------------------------\r
+// ***** OVR_CPP\r
+//\r
+#if !defined(OVR_CPP)\r
+#if defined(__cplusplus)\r
+#define OVR_CPP(x) x\r
+#else\r
+#define OVR_CPP(x) /* Not C++ */\r
+#endif\r
+#endif\r
+\r
+//-----------------------------------------------------------------------------------\r
+// ***** OVR_CDECL\r
+//\r
+/// LibOVR calling convention for 32-bit Windows builds.\r
+//\r
+#if !defined(OVR_CDECL)\r
+#if defined(_WIN32)\r
+#define OVR_CDECL __cdecl\r
+#else\r
+#define OVR_CDECL\r
+#endif\r
+#endif\r
+\r
+//-----------------------------------------------------------------------------------\r
+// ***** OVR_EXTERN_C\r
+//\r
+/// Defined as extern "C" when built from C++ code.\r
+//\r
+#if !defined(OVR_EXTERN_C)\r
+#ifdef __cplusplus\r
+#define OVR_EXTERN_C extern "C"\r
+#else\r
+#define OVR_EXTERN_C\r
+#endif\r
+#endif\r
+\r
+//-----------------------------------------------------------------------------------\r
+// ***** OVR_PUBLIC_FUNCTION / OVR_PRIVATE_FUNCTION\r
+//\r
+// OVR_PUBLIC_FUNCTION - Functions that externally visible from a shared library.\r
+// Corresponds to Microsoft __dllexport.\r
+// OVR_PUBLIC_CLASS - C++ structs and classes that are externally visible from a\r
+// shared library. Corresponds to Microsoft __dllexport.\r
+// OVR_PRIVATE_FUNCTION - Functions that are not visible outside of a shared library.\r
+// They are private to the shared library.\r
+// OVR_PRIVATE_CLASS - C++ structs and classes that are not visible outside of a\r
+// shared library. They are private to the shared library.\r
+//\r
+// OVR_DLL_BUILD - Used to indicate that the current compilation unit is of a shared library.\r
+// OVR_DLL_IMPORT - Used to indicate that the current compilation unit is a\r
+// user of the corresponding shared library.\r
+// OVR_STATIC_BUILD - used to indicate that the current compilation unit is not a\r
+// shared library but rather statically linked code.\r
+//\r
+#if !defined(OVR_PUBLIC_FUNCTION)\r
+#if defined(OVR_DLL_BUILD)\r
+#if defined(_WIN32)\r
+#define OVR_PUBLIC_FUNCTION(rval) OVR_EXTERN_C __declspec(dllexport) rval OVR_CDECL\r
+#define OVR_PUBLIC_CLASS __declspec(dllexport)\r
+#define OVR_PRIVATE_FUNCTION(rval) rval OVR_CDECL\r
+#define OVR_PRIVATE_CLASS\r
+#else\r
+#define OVR_PUBLIC_FUNCTION(rval) \\r
+ OVR_EXTERN_C __attribute__((visibility("default"))) rval OVR_CDECL /* Requires GCC 4.0+ */\r
+#define OVR_PUBLIC_CLASS __attribute__((visibility("default"))) /* Requires GCC 4.0+ */\r
+#define OVR_PRIVATE_FUNCTION(rval) __attribute__((visibility("hidden"))) rval OVR_CDECL\r
+#define OVR_PRIVATE_CLASS __attribute__((visibility("hidden")))\r
+#endif\r
+#elif defined(OVR_DLL_IMPORT)\r
+#if defined(_WIN32)\r
+#define OVR_PUBLIC_FUNCTION(rval) OVR_EXTERN_C __declspec(dllimport) rval OVR_CDECL\r
+#define OVR_PUBLIC_CLASS __declspec(dllimport)\r
+#else\r
+#define OVR_PUBLIC_FUNCTION(rval) OVR_EXTERN_C rval OVR_CDECL\r
+#define OVR_PUBLIC_CLASS\r
+#endif\r
+#define OVR_PRIVATE_FUNCTION(rval) rval OVR_CDECL\r
+#define OVR_PRIVATE_CLASS\r
+#else // OVR_STATIC_BUILD\r
+#define OVR_PUBLIC_FUNCTION(rval) OVR_EXTERN_C rval OVR_CDECL\r
+#define OVR_PUBLIC_CLASS\r
+#define OVR_PRIVATE_FUNCTION(rval) rval OVR_CDECL\r
+#define OVR_PRIVATE_CLASS\r
+#endif\r
+#endif\r
+\r
+//-----------------------------------------------------------------------------------\r
+// ***** OVR_EXPORT\r
+//\r
+/// Provided for backward compatibility with older versions of this library.\r
+//\r
+#if !defined(OVR_EXPORT)\r
+#ifdef OVR_OS_WIN32\r
+#define OVR_EXPORT __declspec(dllexport)\r
+#else\r
+#define OVR_EXPORT\r
+#endif\r
+#endif\r
+\r
+//-----------------------------------------------------------------------------------\r
+// ***** OVR_ALIGNAS\r
+//\r
+#if !defined(OVR_ALIGNAS)\r
+#if defined(__GNUC__) || defined(__clang__)\r
+#define OVR_ALIGNAS(n) __attribute__((aligned(n)))\r
+#elif defined(_MSC_VER) || defined(__INTEL_COMPILER)\r
+#define OVR_ALIGNAS(n) __declspec(align(n))\r
+#elif defined(__CC_ARM)\r
+#define OVR_ALIGNAS(n) __align(n)\r
+#else\r
+#error Need to define OVR_ALIGNAS\r
+#endif\r
+#endif\r
+\r
+//-----------------------------------------------------------------------------------\r
+// ***** OVR_CC_HAS_FEATURE\r
+//\r
+// This is a portable way to use compile-time feature identification available\r
+// with some compilers in a clean way. Direct usage of __has_feature in preprocessing\r
+// statements of non-supporting compilers results in a preprocessing error.\r
+//\r
+// Example usage:\r
+// #if OVR_CC_HAS_FEATURE(is_pod)\r
+// if(__is_pod(T)) // If the type is plain data then we can safely memcpy it.\r
+// memcpy(&destObject, &srcObject, sizeof(object));\r
+// #endif\r
+//\r
+#if !defined(OVR_CC_HAS_FEATURE)\r
+#if defined(__clang__) // http://clang.llvm.org/docs/LanguageExtensions.html#id2\r
+#define OVR_CC_HAS_FEATURE(x) __has_feature(x)\r
+#else\r
+#define OVR_CC_HAS_FEATURE(x) 0\r
+#endif\r
+#endif\r
+\r
+// ------------------------------------------------------------------------\r
+// ***** OVR_STATIC_ASSERT\r
+//\r
+// Portable support for C++11 static_assert().\r
+// Acts as if the following were declared:\r
+// void OVR_STATIC_ASSERT(bool const_expression, const char* msg);\r
+//\r
+// Example usage:\r
+// OVR_STATIC_ASSERT(sizeof(int32_t) == 4, "int32_t expected to be 4 bytes.");\r
+\r
+#if !defined(OVR_STATIC_ASSERT)\r
+#if !(defined(__cplusplus) && (__cplusplus >= 201103L)) /* Other */ && \\r
+ !(defined(__GXX_EXPERIMENTAL_CXX0X__)) /* GCC */ && \\r
+ !(defined(__clang__) && defined(__cplusplus) && \\r
+ OVR_CC_HAS_FEATURE(cxx_static_assert)) /* clang */ \\r
+ && !(defined(_MSC_VER) && (_MSC_VER >= 1600) && defined(__cplusplus)) /* VS2010+ */\r
+\r
+#if !defined(OVR_SA_UNUSED)\r
+#if defined(OVR_CC_GNU) || defined(OVR_CC_CLANG)\r
+#define OVR_SA_UNUSED __attribute__((unused))\r
+#else\r
+#define OVR_SA_UNUSED\r
+#endif\r
+#define OVR_SA_PASTE(a, b) a##b\r
+#define OVR_SA_HELP(a, b) OVR_SA_PASTE(a, b)\r
+#endif\r
+\r
+#if defined(__COUNTER__)\r
+#define OVR_STATIC_ASSERT(expression, msg) \\r
+ typedef char OVR_SA_HELP(staticAssert, __COUNTER__)[((expression) != 0) ? 1 : -1] OVR_SA_UNUSED\r
+#else\r
+#define OVR_STATIC_ASSERT(expression, msg) \\r
+ typedef char OVR_SA_HELP(staticAssert, __LINE__)[((expression) != 0) ? 1 : -1] OVR_SA_UNUSED\r
+#endif\r
+\r
+#else\r
+#define OVR_STATIC_ASSERT(expression, msg) static_assert(expression, msg)\r
+#endif\r
+#endif\r
+\r
+//-----------------------------------------------------------------------------------\r
+// ***** Padding\r
+//\r
+/// Defines explicitly unused space for a struct.\r
+/// When used correcly, usage of this macro should not change the size of the struct.\r
+/// Compile-time and runtime behavior with and without this defined should be identical.\r
+///\r
+#if !defined(OVR_UNUSED_STRUCT_PAD)\r
+#define OVR_UNUSED_STRUCT_PAD(padName, size) char padName[size];\r
+#endif\r
+\r
+//-----------------------------------------------------------------------------------\r
+// ***** Word Size\r
+//\r
+/// Specifies the size of a pointer on the given platform.\r
+///\r
+#if !defined(OVR_PTR_SIZE)\r
+#if defined(__WORDSIZE)\r
+#define OVR_PTR_SIZE ((__WORDSIZE) / 8)\r
+#elif defined(_WIN64) || defined(__LP64__) || defined(_LP64) || defined(_M_IA64) || \\r
+ defined(__ia64__) || defined(__arch64__) || defined(__64BIT__) || defined(__Ptr_Is_64)\r
+#define OVR_PTR_SIZE 8\r
+#elif defined(__CC_ARM) && (__sizeof_ptr == 8)\r
+#define OVR_PTR_SIZE 8\r
+#else\r
+#define OVR_PTR_SIZE 4\r
+#endif\r
+#endif\r
+\r
+//-----------------------------------------------------------------------------------\r
+// ***** OVR_ON32 / OVR_ON64\r
+//\r
+#if OVR_PTR_SIZE == 8\r
+#define OVR_ON32(x)\r
+#define OVR_ON64(x) x\r
+#else\r
+#define OVR_ON32(x) x\r
+#define OVR_ON64(x)\r
+#endif\r
+\r
+//-----------------------------------------------------------------------------------\r
+// ***** ovrBool\r
+\r
+typedef char ovrBool; ///< Boolean type\r
+#define ovrFalse 0 ///< ovrBool value of false.\r
+#define ovrTrue 1 ///< ovrBool value of true.\r
+\r
+//-----------------------------------------------------------------------------------\r
+// ***** Simple Math Structures\r
+\r
+/// A RGBA color with normalized float components.\r
+typedef struct OVR_ALIGNAS(4) ovrColorf_ {\r
+ float r, g, b, a;\r
+} ovrColorf;\r
+\r
+/// A 2D vector with integer components.\r
+typedef struct OVR_ALIGNAS(4) ovrVector2i_ {\r
+ int x, y;\r
+} ovrVector2i;\r
+\r
+/// A 2D size with integer components.\r
+typedef struct OVR_ALIGNAS(4) ovrSizei_ {\r
+ int w, h;\r
+} ovrSizei;\r
+\r
+/// A 2D rectangle with a position and size.\r
+/// All components are integers.\r
+typedef struct OVR_ALIGNAS(4) ovrRecti_ {\r
+ ovrVector2i Pos;\r
+ ovrSizei Size;\r
+} ovrRecti;\r
+\r
+/// A quaternion rotation.\r
+typedef struct OVR_ALIGNAS(4) ovrQuatf_ {\r
+ float x, y, z, w;\r
+} ovrQuatf;\r
+\r
+/// A 2D vector with float components.\r
+typedef struct OVR_ALIGNAS(4) ovrVector2f_ {\r
+ float x, y;\r
+} ovrVector2f;\r
+\r
+/// A 3D vector with float components.\r
+typedef struct OVR_ALIGNAS(4) ovrVector3f_ {\r
+ float x, y, z;\r
+} ovrVector3f;\r
+\r
+/// A 4x4 matrix with float elements.\r
+typedef struct OVR_ALIGNAS(4) ovrMatrix4f_ {\r
+ float M[4][4];\r
+} ovrMatrix4f;\r
+\r
+/// Position and orientation together.\r
+/// The coordinate system used is right-handed Cartesian.\r
+typedef struct OVR_ALIGNAS(4) ovrPosef_ {\r
+ ovrQuatf Orientation;\r
+ ovrVector3f Position;\r
+} ovrPosef;\r
+\r
+/// A full pose (rigid body) configuration with first and second derivatives.\r
+///\r
+/// Body refers to any object for which ovrPoseStatef is providing data.\r
+/// It can be the HMD, Touch controller, sensor or something else. The context\r
+/// depends on the usage of the struct.\r
+typedef struct OVR_ALIGNAS(8) ovrPoseStatef_ {\r
+ ovrPosef ThePose; ///< Position and orientation.\r
+ ovrVector3f AngularVelocity; ///< Angular velocity in radians per second.\r
+ ovrVector3f LinearVelocity; ///< Velocity in meters per second.\r
+ ovrVector3f AngularAcceleration; ///< Angular acceleration in radians per second per second.\r
+ ovrVector3f LinearAcceleration; ///< Acceleration in meters per second per second.\r
+ OVR_UNUSED_STRUCT_PAD(pad0, 4) ///< \internal struct pad.\r
+ double TimeInSeconds; ///< Absolute time that this pose refers to. \see ovr_GetTimeInSeconds\r
+} ovrPoseStatef;\r
+\r
+/// Describes the up, down, left, and right angles of the field of view.\r
+///\r
+/// Field Of View (FOV) tangent of the angle units.\r
+/// \note For a standard 90 degree vertical FOV, we would\r
+/// have: { UpTan = tan(90 degrees / 2), DownTan = tan(90 degrees / 2) }.\r
+typedef struct OVR_ALIGNAS(4) ovrFovPort_ {\r
+ float UpTan; ///< Tangent of the angle between the viewing vector and top edge of the FOV.\r
+ float DownTan; ///< Tangent of the angle between the viewing vector and bottom edge of the FOV.\r
+ float LeftTan; ///< Tangent of the angle between the viewing vector and left edge of the FOV.\r
+ float RightTan; ///< Tangent of the angle between the viewing vector and right edge of the FOV.\r
+} ovrFovPort;\r
+\r
+//-----------------------------------------------------------------------------------\r
+// ***** HMD Types\r
+\r
+/// Enumerates all HMD types that we support.\r
+///\r
+/// The currently released developer kits are ovrHmd_DK1 and ovrHmd_DK2.\r
+/// The other enumerations are for internal use only.\r
+typedef enum ovrHmdType_ {\r
+ ovrHmd_None = 0,\r
+ ovrHmd_DK1 = 3,\r
+ ovrHmd_DKHD = 4,\r
+ ovrHmd_DK2 = 6,\r
+ ovrHmd_CB = 8,\r
+ ovrHmd_Other = 9,\r
+ ovrHmd_E3_2015 = 10,\r
+ ovrHmd_ES06 = 11,\r
+ ovrHmd_ES09 = 12,\r
+ ovrHmd_ES11 = 13,\r
+ ovrHmd_CV1 = 14,\r
+\r
+ ovrHmd_EnumSize = 0x7fffffff ///< \internal Force type int32_t.\r
+} ovrHmdType;\r
+\r
+/// HMD capability bits reported by device.\r
+///\r
+typedef enum ovrHmdCaps_ {\r
+ // Read-only flags\r
+\r
+ /// <B>(read only)</B> Specifies that the HMD is a virtual debug device.\r
+ ovrHmdCap_DebugDevice = 0x0010,\r
+\r
+\r
+ ovrHmdCap_EnumSize = 0x7fffffff ///< \internal Force type int32_t.\r
+} ovrHmdCaps;\r
+\r
+/// Tracking capability bits reported by the device.\r
+/// Used with ovr_GetTrackingCaps.\r
+typedef enum ovrTrackingCaps_ {\r
+ ovrTrackingCap_Orientation = 0x0010, ///< Supports orientation tracking (IMU).\r
+ ovrTrackingCap_MagYawCorrection = 0x0020, ///< Supports yaw drift correction.\r
+ ovrTrackingCap_Position = 0x0040, ///< Supports positional tracking.\r
+ ovrTrackingCap_EnumSize = 0x7fffffff ///< \internal Force type int32_t.\r
+} ovrTrackingCaps;\r
+\r
+/// Optional extensions\r
+typedef enum ovrExtensions_ {\r
+ ovrExtension_TextureLayout_Octilinear = 0, ///< Enable before first layer submission.\r
+ ovrExtension_Count, ///< \internal Sanity checking\r
+ ovrExtension_EnumSize = 0x7fffffff ///< \internal Force type int32_t.\r
+} ovrExtensions;\r
+\r
+/// Specifies which eye is being used for rendering.\r
+/// This type explicitly does not include a third "NoStereo" monoscopic option,\r
+/// as such is not required for an HMD-centered API.\r
+typedef enum ovrEyeType_ {\r
+ ovrEye_Left = 0, ///< The left eye, from the viewer's perspective.\r
+ ovrEye_Right = 1, ///< The right eye, from the viewer's perspective.\r
+ ovrEye_Count = 2, ///< \internal Count of enumerated elements.\r
+ ovrEye_EnumSize = 0x7fffffff ///< \internal Force type int32_t.\r
+} ovrEyeType;\r
+\r
+/// Specifies the coordinate system ovrTrackingState returns tracking poses in.\r
+/// Used with ovr_SetTrackingOriginType()\r
+typedef enum ovrTrackingOrigin_ {\r
+ /// \brief Tracking system origin reported at eye (HMD) height\r
+ /// \details Prefer using this origin when your application requires\r
+ /// matching user's current physical head pose to a virtual head pose\r
+ /// without any regards to a the height of the floor. Cockpit-based,\r
+ /// or 3rd-person experiences are ideal candidates.\r
+ /// When used, all poses in ovrTrackingState are reported as an offset\r
+ /// transform from the profile calibrated or recentered HMD pose.\r
+ /// It is recommended that apps using this origin type call ovr_RecenterTrackingOrigin\r
+ /// prior to starting the VR experience, but notify the user before doing so\r
+ /// to make sure the user is in a comfortable pose, facing a comfortable\r
+ /// direction.\r
+ ovrTrackingOrigin_EyeLevel = 0,\r
+\r
+ /// \brief Tracking system origin reported at floor height\r
+ /// \details Prefer using this origin when your application requires the\r
+ /// physical floor height to match the virtual floor height, such as\r
+ /// standing experiences.\r
+ /// When used, all poses in ovrTrackingState are reported as an offset\r
+ /// transform from the profile calibrated floor pose. Calling ovr_RecenterTrackingOrigin\r
+ /// will recenter the X & Z axes as well as yaw, but the Y-axis (i.e. height) will continue\r
+ /// to be reported using the floor height as the origin for all poses.\r
+ ovrTrackingOrigin_FloorLevel = 1,\r
+\r
+ ovrTrackingOrigin_Count = 2, ///< \internal Count of enumerated elements.\r
+ ovrTrackingOrigin_EnumSize = 0x7fffffff ///< \internal Force type int32_t.\r
+} ovrTrackingOrigin;\r
+\r
+/// Identifies a graphics device in a platform-specific way.\r
+/// For Windows this is a LUID type.\r
+typedef struct OVR_ALIGNAS(OVR_PTR_SIZE) ovrGraphicsLuid_ {\r
+ // Public definition reserves space for graphics API-specific implementation\r
+ char Reserved[8];\r
+} ovrGraphicsLuid;\r
+\r
+/// This is a complete descriptor of the HMD.\r
+typedef struct OVR_ALIGNAS(OVR_PTR_SIZE) ovrHmdDesc_ {\r
+ ovrHmdType Type; ///< The type of HMD.\r
+ OVR_ON64(OVR_UNUSED_STRUCT_PAD(pad0, 4)) ///< \internal struct paddding.\r
+ char ProductName[64]; ///< UTF8-encoded product identification string (e.g. "Oculus Rift DK1").\r
+ char Manufacturer[64]; ///< UTF8-encoded HMD manufacturer identification string.\r
+ short VendorId; ///< HID (USB) vendor identifier of the device.\r
+ short ProductId; ///< HID (USB) product identifier of the device.\r
+ char SerialNumber[24]; ///< HMD serial number.\r
+ short FirmwareMajor; ///< HMD firmware major version.\r
+ short FirmwareMinor; ///< HMD firmware minor version.\r
+ unsigned int AvailableHmdCaps; ///< Available ovrHmdCaps bits.\r
+ unsigned int DefaultHmdCaps; ///< Default ovrHmdCaps bits.\r
+ unsigned int AvailableTrackingCaps; ///< Available ovrTrackingCaps bits.\r
+ unsigned int DefaultTrackingCaps; ///< Default ovrTrackingCaps bits.\r
+ ovrFovPort DefaultEyeFov[ovrEye_Count]; ///< Defines the recommended FOVs for the HMD.\r
+ ovrFovPort MaxEyeFov[ovrEye_Count]; ///< Defines the maximum FOVs for the HMD.\r
+ ovrSizei Resolution; ///< Resolution of the full HMD screen (both eyes) in pixels.\r
+ float DisplayRefreshRate; ///< Refresh rate of the display in cycles per second.\r
+ OVR_ON64(OVR_UNUSED_STRUCT_PAD(pad1, 4)) ///< \internal struct paddding.\r
+} ovrHmdDesc;\r
+\r
+/// Used as an opaque pointer to an OVR session.\r
+typedef struct ovrHmdStruct* ovrSession;\r
+\r
+#ifdef OVR_OS_WIN32\r
+typedef uint32_t ovrProcessId;\r
+#else\r
+typedef pid_t ovrProcessId;\r
+#endif\r
+\r
+/// Fallback definitions for when the vulkan header isn't being included\r
+#if !defined(VK_VERSION_1_0)\r
+// From <vulkan/vulkan.h>:\r
+#define VK_DEFINE_HANDLE(object) typedef struct object##_T* object;\r
+#if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__)) || \\r
+ defined(_M_X64) || defined(__ia64) || defined(_M_IA64) || defined(__aarch64__) || \\r
+ defined(__powerpc64__)\r
+#define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) typedef struct object##_T* object;\r
+#else\r
+#define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) typedef uint64_t object;\r
+#endif\r
+VK_DEFINE_HANDLE(VkInstance)\r
+VK_DEFINE_HANDLE(VkPhysicalDevice)\r
+VK_DEFINE_HANDLE(VkDevice)\r
+VK_DEFINE_HANDLE(VkQueue)\r
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkImage)\r
+#endif\r
+\r
+/// Bit flags describing the current status of sensor tracking.\r
+/// The values must be the same as in enum StatusBits\r
+///\r
+/// \see ovrTrackingState\r
+///\r
+typedef enum ovrStatusBits_ {\r
+ ovrStatus_OrientationTracked = 0x0001, ///< Orientation is currently tracked (connected & in use).\r
+ ovrStatus_PositionTracked = 0x0002, ///< Position is currently tracked (false if out of range).\r
+ ovrStatus_EnumSize = 0x7fffffff ///< \internal Force type int32_t.\r
+} ovrStatusBits;\r
+\r
+/// Specifies the description of a single sensor.\r
+///\r
+/// \see ovr_GetTrackerDesc\r
+///\r
+typedef struct OVR_ALIGNAS(OVR_PTR_SIZE) ovrTrackerDesc_ {\r
+ float FrustumHFovInRadians; ///< Sensor frustum horizontal field-of-view (if present).\r
+ float FrustumVFovInRadians; ///< Sensor frustum vertical field-of-view (if present).\r
+ float FrustumNearZInMeters; ///< Sensor frustum near Z (if present).\r
+ float FrustumFarZInMeters; ///< Sensor frustum far Z (if present).\r
+} ovrTrackerDesc;\r
+\r
+/// Specifies sensor flags.\r
+///\r
+/// /see ovrTrackerPose\r
+///\r
+typedef enum ovrTrackerFlags_ {\r
+ /// The sensor is present, else the sensor is absent or offline.\r
+ ovrTracker_Connected = 0x0020,\r
+\r
+ /// The sensor has a valid pose, else the pose is unavailable.\r
+ /// This will only be set if ovrTracker_Connected is set.\r
+ ovrTracker_PoseTracked = 0x0004\r
+} ovrTrackerFlags;\r
+\r
+/// Specifies the pose for a single sensor.\r
+///\r
+typedef struct OVR_ALIGNAS(8) _ovrTrackerPose {\r
+ /// ovrTrackerFlags.\r
+ unsigned int TrackerFlags;\r
+\r
+ /// The sensor's pose. This pose includes sensor tilt (roll and pitch).\r
+ /// For a leveled coordinate system use LeveledPose.\r
+ ovrPosef Pose;\r
+\r
+ /// The sensor's leveled pose, aligned with gravity. This value includes pos and yaw of the\r
+ /// sensor, but not roll and pitch. It can be used as a reference point to render real-world\r
+ /// objects in the correct location.\r
+ ovrPosef LeveledPose;\r
+\r
+ OVR_UNUSED_STRUCT_PAD(pad0, 4) ///< \internal struct pad.\r
+} ovrTrackerPose;\r
+\r
+/// Tracking state at a given absolute time (describes predicted HMD pose, etc.).\r
+/// Returned by ovr_GetTrackingState.\r
+///\r
+/// \see ovr_GetTrackingState\r
+///\r
+typedef struct OVR_ALIGNAS(8) ovrTrackingState_ {\r
+ /// Predicted head pose (and derivatives) at the requested absolute time.\r
+ ovrPoseStatef HeadPose;\r
+\r
+ /// HeadPose tracking status described by ovrStatusBits.\r
+ unsigned int StatusFlags;\r
+\r
+ /// The most recent calculated pose for each hand when hand controller tracking is present.\r
+ /// HandPoses[ovrHand_Left] refers to the left hand and HandPoses[ovrHand_Right] to the right.\r
+ /// These values can be combined with ovrInputState for complete hand controller information.\r
+ ovrPoseStatef HandPoses[2];\r
+\r
+ /// HandPoses status flags described by ovrStatusBits.\r
+ /// Only ovrStatus_OrientationTracked and ovrStatus_PositionTracked are reported.\r
+ unsigned int HandStatusFlags[2];\r
+\r
+ /// The pose of the origin captured during calibration.\r
+ /// Like all other poses here, this is expressed in the space set by ovr_RecenterTrackingOrigin,\r
+ /// or ovr_SpecifyTrackingOrigin and so will change every time either of those functions are\r
+ /// called. This pose can be used to calculate where the calibrated origin lands in the new\r
+ /// recentered space. If an application never calls ovr_RecenterTrackingOrigin or\r
+ /// ovr_SpecifyTrackingOrigin, expect this value to be the identity pose and as such will point\r
+ /// respective origin based on ovrTrackingOrigin requested when calling ovr_GetTrackingState.\r
+ ovrPosef CalibratedOrigin;\r
+\r
+} ovrTrackingState;\r
+\r
+\r
+\r
+/// Rendering information for each eye. Computed by ovr_GetRenderDesc() based on the\r
+/// specified FOV. Note that the rendering viewport is not included\r
+/// here as it can be specified separately and modified per frame by\r
+/// passing different Viewport values in the layer structure.\r
+///\r
+/// \see ovr_GetRenderDesc\r
+///\r
+typedef struct OVR_ALIGNAS(4) ovrEyeRenderDesc_ {\r
+ ovrEyeType Eye; ///< The eye index to which this instance corresponds.\r
+ ovrFovPort Fov; ///< The field of view.\r
+ ovrRecti DistortedViewport; ///< Distortion viewport.\r
+ ovrVector2f PixelsPerTanAngleAtCenter; ///< How many display pixels will fit in tan(angle) = 1.\r
+ ovrPosef HmdToEyePose; ///< Transform of eye from the HMD center, in meters.\r
+} ovrEyeRenderDesc;\r
+\r
+/// Projection information for ovrLayerEyeFovDepth.\r
+///\r
+/// Use the utility function ovrTimewarpProjectionDesc_FromProjection to\r
+/// generate this structure from the application's projection matrix.\r
+///\r
+/// \see ovrLayerEyeFovDepth, ovrTimewarpProjectionDesc_FromProjection\r
+///\r
+typedef struct OVR_ALIGNAS(4) ovrTimewarpProjectionDesc_ {\r
+ float Projection22; ///< Projection matrix element [2][2].\r
+ float Projection23; ///< Projection matrix element [2][3].\r
+ float Projection32; ///< Projection matrix element [3][2].\r
+} ovrTimewarpProjectionDesc;\r
+\r
+\r
+/// Contains the data necessary to properly calculate position info for various layer types.\r
+/// - HmdToEyePose is the same value-pair provided in ovrEyeRenderDesc. Modifying this value is\r
+/// suggested only if the app is forcing monoscopic rendering and requires that all layers\r
+/// including quad layers show up in a monoscopic fashion.\r
+/// - HmdSpaceToWorldScaleInMeters is used to scale player motion into in-application units.\r
+/// In other words, it is how big an in-application unit is in the player's physical meters.\r
+/// For example, if the application uses inches as its units then HmdSpaceToWorldScaleInMeters\r
+/// would be 0.0254.\r
+/// Note that if you are scaling the player in size, this must also scale. So if your application\r
+/// units are inches, but you're shrinking the player to half their normal size, then\r
+/// HmdSpaceToWorldScaleInMeters would be 0.0254*2.0.\r
+///\r
+/// \see ovrEyeRenderDesc, ovr_SubmitFrame\r
+///\r
+typedef struct OVR_ALIGNAS(4) ovrViewScaleDesc_ {\r
+ ovrPosef HmdToEyePose[ovrEye_Count]; ///< Transform of each eye from the HMD center, in meters.\r
+ float HmdSpaceToWorldScaleInMeters; ///< Ratio of viewer units to meter units.\r
+} ovrViewScaleDesc;\r
+\r
+//-----------------------------------------------------------------------------------\r
+// ***** Platform-independent Rendering Configuration\r
+\r
+/// The type of texture resource.\r
+///\r
+/// \see ovrTextureSwapChainDesc\r
+///\r
+typedef enum ovrTextureType_ {\r
+ ovrTexture_2D, ///< 2D textures.\r
+ ovrTexture_2D_External, ///< Application-provided 2D texture. Not supported on PC.\r
+ ovrTexture_Cube, ///< Cube maps. ovrTextureSwapChainDesc::ArraySize must be 6 for this type.\r
+ ovrTexture_Count,\r
+ ovrTexture_EnumSize = 0x7fffffff ///< \internal Force type int32_t.\r
+} ovrTextureType;\r
+\r
+/// The bindings required for texture swap chain.\r
+///\r
+/// All texture swap chains are automatically bindable as shader\r
+/// input resources since the Oculus runtime needs this to read them.\r
+///\r
+/// \see ovrTextureSwapChainDesc\r
+///\r
+typedef enum ovrTextureBindFlags_ {\r
+ ovrTextureBind_None,\r
+\r
+ /// The application can write into the chain with pixel shader.\r
+ ovrTextureBind_DX_RenderTarget = 0x0001,\r
+\r
+ /// The application can write to the chain with compute shader.\r
+ ovrTextureBind_DX_UnorderedAccess = 0x0002,\r
+\r
+ /// The chain buffers can be bound as depth and/or stencil buffers.\r
+ /// This flag cannot be combined with ovrTextureBind_DX_RenderTarget.\r
+ ovrTextureBind_DX_DepthStencil = 0x0004,\r
+\r
+ ovrTextureBind_EnumSize = 0x7fffffff ///< \internal Force type int32_t.\r
+} ovrTextureBindFlags;\r
+\r
+/// The format of a texture.\r
+///\r
+/// \see ovrTextureSwapChainDesc\r
+///\r
+typedef enum ovrTextureFormat_ {\r
+ OVR_FORMAT_UNKNOWN = 0,\r
+ OVR_FORMAT_B5G6R5_UNORM = 1, ///< Not currently supported on PC. Requires a DirectX 11.1 device.\r
+ OVR_FORMAT_B5G5R5A1_UNORM = 2, ///< Not currently supported on PC. Requires a DirectX 11.1 device.\r
+ OVR_FORMAT_B4G4R4A4_UNORM = 3, ///< Not currently supported on PC. Requires a DirectX 11.1 device.\r
+ OVR_FORMAT_R8G8B8A8_UNORM = 4,\r
+ OVR_FORMAT_R8G8B8A8_UNORM_SRGB = 5,\r
+ OVR_FORMAT_B8G8R8A8_UNORM = 6,\r
+ OVR_FORMAT_B8G8R8_UNORM = 27,\r
+ OVR_FORMAT_B8G8R8A8_UNORM_SRGB = 7, ///< Not supported for OpenGL applications\r
+ OVR_FORMAT_B8G8R8X8_UNORM = 8, ///< Not supported for OpenGL applications\r
+ OVR_FORMAT_B8G8R8X8_UNORM_SRGB = 9, ///< Not supported for OpenGL applications\r
+ OVR_FORMAT_R16G16B16A16_FLOAT = 10,\r
+ OVR_FORMAT_R11G11B10_FLOAT = 25, ///< Introduced in v1.10\r
+\r
+ // Depth formats\r
+ OVR_FORMAT_D16_UNORM = 11,\r
+ OVR_FORMAT_D24_UNORM_S8_UINT = 12,\r
+ OVR_FORMAT_D32_FLOAT = 13,\r
+ OVR_FORMAT_D32_FLOAT_S8X24_UINT = 14,\r
+\r
+ // Added in 1.5 compressed formats can be used for static layers\r
+ OVR_FORMAT_BC1_UNORM = 15,\r
+ OVR_FORMAT_BC1_UNORM_SRGB = 16,\r
+ OVR_FORMAT_BC2_UNORM = 17,\r
+ OVR_FORMAT_BC2_UNORM_SRGB = 18,\r
+ OVR_FORMAT_BC3_UNORM = 19,\r
+ OVR_FORMAT_BC3_UNORM_SRGB = 20,\r
+ OVR_FORMAT_BC6H_UF16 = 21,\r
+ OVR_FORMAT_BC6H_SF16 = 22,\r
+ OVR_FORMAT_BC7_UNORM = 23,\r
+ OVR_FORMAT_BC7_UNORM_SRGB = 24,\r
+\r
+\r
+ OVR_FORMAT_ENUMSIZE = 0x7fffffff ///< \internal Force type int32_t.\r
+} ovrTextureFormat;\r
+\r
+/// Misc flags overriding particular\r
+/// behaviors of a texture swap chain\r
+///\r
+/// \see ovrTextureSwapChainDesc\r
+///\r
+typedef enum ovrTextureMiscFlags_ {\r
+ ovrTextureMisc_None,\r
+\r
+ /// Vulkan and DX only: The underlying texture is created with a TYPELESS equivalent\r
+ /// of the format specified in the texture desc. The SDK will still access the\r
+ /// texture using the format specified in the texture desc, but the app can\r
+ /// create views with different formats if this is specified.\r
+ ovrTextureMisc_DX_Typeless = 0x0001,\r
+\r
+ /// DX only: Allow generation of the mip chain on the GPU via the GenerateMips\r
+ /// call. This flag requires that RenderTarget binding also be specified.\r
+ ovrTextureMisc_AllowGenerateMips = 0x0002,\r
+\r
+ /// Texture swap chain contains protected content, and requires\r
+ /// HDCP connection in order to display to HMD. Also prevents\r
+ /// mirroring or other redirection of any frame containing this contents\r
+ ovrTextureMisc_ProtectedContent = 0x0004,\r
+\r
+ /// Automatically generate and use the mip chain in composition on each submission.\r
+ /// Mips are regenerated from highest quality level, ignoring other pre-existing mip levels.\r
+ /// Not supported for depth or compressed (BC) formats.\r
+ ovrTextureMisc_AutoGenerateMips = 0x0008,\r
+\r
+ ovrTextureMisc_EnumSize = 0x7fffffff ///< \internal Force type int32_t.\r
+} ovrTextureFlags;\r
+\r
+/// Description used to create a texture swap chain.\r
+///\r
+/// \see ovr_CreateTextureSwapChainDX\r
+/// \see ovr_CreateTextureSwapChainGL\r
+///\r
+typedef struct ovrTextureSwapChainDesc_ {\r
+ ovrTextureType Type; ///< Must not be ovrTexture_Window\r
+ ovrTextureFormat Format;\r
+ int ArraySize; ///< Must be 6 for ovrTexture_Cube, 1 for other types.\r
+ int Width;\r
+ int Height;\r
+ int MipLevels;\r
+ int SampleCount;\r
+ ovrBool StaticImage; ///< Not buffered in a chain. For images that don't change\r
+ OVR_ALIGNAS(4) unsigned int MiscFlags; ///< ovrTextureFlags\r
+ OVR_ALIGNAS(4) unsigned int BindFlags; ///< ovrTextureBindFlags. Not used for GL.\r
+} ovrTextureSwapChainDesc;\r
+\r
+/// Bit flags used as part of ovrMirrorTextureDesc's MirrorOptions field.\r
+///\r
+/// \see ovr_CreateMirrorTextureWithOptionsDX\r
+/// \see ovr_CreateMirrorTextureWithOptionsGL\r
+/// \see ovr_CreateMirrorTextureWithOptionsVk\r
+///\r
+typedef enum ovrMirrorOptions_ {\r
+ /// By default the mirror texture will be:\r
+ /// * Pre-distortion (i.e. rectilinear)\r
+ /// * Contain both eye textures\r
+ /// * Exclude Guardian, Notifications, System Menu GUI\r
+ ovrMirrorOption_Default = 0x0000,\r
+\r
+ /// Retrieves the barrel distorted texture contents instead of the rectilinear one\r
+ /// This is only recommended for debugging purposes, and not for final desktop presentation\r
+ ovrMirrorOption_PostDistortion = 0x0001,\r
+\r
+ /// Since ovrMirrorOption_Default renders both eyes into the mirror texture,\r
+ /// these two flags are exclusive (i.e. cannot use them simultaneously)\r
+ ovrMirrorOption_LeftEyeOnly = 0x0002,\r
+ ovrMirrorOption_RightEyeOnly = 0x0004,\r
+\r
+ /// Shows the boundary system aka Guardian on the mirror texture\r
+ ovrMirrorOption_IncludeGuardian = 0x0008,\r
+\r
+ /// Shows system notifications the user receives on the mirror texture\r
+ ovrMirrorOption_IncludeNotifications = 0x0010,\r
+\r
+ /// Shows the system menu (triggered by hitting the Home button) on the mirror texture\r
+ ovrMirrorOption_IncludeSystemGui = 0x0020,\r
+\r
+\r
+ ovrMirrorOption_EnumSize = 0x7fffffff ///< \internal Force type int32_t.\r
+} ovrMirrorOptions;\r
+\r
+/// Description used to create a mirror texture.\r
+///\r
+/// \see ovr_CreateMirrorTextureWithOptionsDX\r
+/// \see ovr_CreateMirrorTextureWithOptionsGL\r
+/// \see ovr_CreateMirrorTextureWithOptionsVk\r
+///\r
+typedef struct ovrMirrorTextureDesc_ {\r
+ ovrTextureFormat Format;\r
+ int Width;\r
+ int Height;\r
+ unsigned int MiscFlags; ///< ovrTextureFlags\r
+ unsigned int MirrorOptions; ///< ovrMirrorOptions\r
+} ovrMirrorTextureDesc;\r
+\r
+typedef struct ovrTextureSwapChainData* ovrTextureSwapChain;\r
+typedef struct ovrMirrorTextureData* ovrMirrorTexture;\r
+\r
+//-----------------------------------------------------------------------------------\r
+\r
+/// Describes button input types.\r
+/// Button inputs are combined; that is they will be reported as pressed if they are\r
+/// pressed on either one of the two devices.\r
+/// The ovrButton_Up/Down/Left/Right map to both XBox D-Pad and directional buttons.\r
+/// The ovrButton_Enter and ovrButton_Return map to Start and Back controller buttons, respectively.\r
+typedef enum ovrButton_ {\r
+ /// A button on XBox controllers and right Touch controller. Select button on Oculus Remote.\r
+ ovrButton_A = 0x00000001,\r
+\r
+ /// B button on XBox controllers and right Touch controller. Back button on Oculus Remote.\r
+ ovrButton_B = 0x00000002,\r
+\r
+ /// Right thumbstick on XBox controllers and Touch controllers. Not present on Oculus Remote.\r
+ ovrButton_RThumb = 0x00000004,\r
+\r
+ /// Right shoulder button on XBox controllers. Not present on Touch controllers or Oculus Remote.\r
+ ovrButton_RShoulder = 0x00000008,\r
+\r
+\r
+ /// X button on XBox controllers and left Touch controller. Not present on Oculus Remote.\r
+ ovrButton_X = 0x00000100,\r
+\r
+ /// Y button on XBox controllers and left Touch controller. Not present on Oculus Remote.\r
+ ovrButton_Y = 0x00000200,\r
+\r
+ /// Left thumbstick on XBox controllers and Touch controllers. Not present on Oculus Remote.\r
+ ovrButton_LThumb = 0x00000400,\r
+\r
+ /// Left shoulder button on XBox controllers. Not present on Touch controllers or Oculus Remote.\r
+ ovrButton_LShoulder = 0x00000800,\r
+\r
+ /// Up button on XBox controllers and Oculus Remote. Not present on Touch controllers.\r
+ ovrButton_Up = 0x00010000,\r
+\r
+ /// Down button on XBox controllers and Oculus Remote. Not present on Touch controllers.\r
+ ovrButton_Down = 0x00020000,\r
+\r
+ /// Left button on XBox controllers and Oculus Remote. Not present on Touch controllers.\r
+ ovrButton_Left = 0x00040000,\r
+\r
+ /// Right button on XBox controllers and Oculus Remote. Not present on Touch controllers.\r
+ ovrButton_Right = 0x00080000,\r
+\r
+ /// Start on XBox 360 controller. Menu on XBox One controller and Left Touch controller.\r
+ /// Should be referred to as the Menu button in user-facing documentation.\r
+ ovrButton_Enter = 0x00100000,\r
+\r
+ /// Back on Xbox 360 controller. View button on XBox One controller. Not present on Touch\r
+ /// controllers or Oculus Remote.\r
+ ovrButton_Back = 0x00200000,\r
+\r
+ /// Volume button on Oculus Remote. Not present on XBox or Touch controllers.\r
+ ovrButton_VolUp = 0x00400000,\r
+\r
+ /// Volume button on Oculus Remote. Not present on XBox or Touch controllers.\r
+ ovrButton_VolDown = 0x00800000,\r
+\r
+ /// Home button on XBox controllers. Oculus button on Touch controllers and Oculus Remote.\r
+ ovrButton_Home = 0x01000000,\r
+\r
+ // Bit mask of all buttons that are for private usage by Oculus\r
+ ovrButton_Private = ovrButton_VolUp | ovrButton_VolDown | ovrButton_Home,\r
+\r
+ // Bit mask of all buttons on the right Touch controller\r
+ ovrButton_RMask = ovrButton_A | ovrButton_B | ovrButton_RThumb | ovrButton_RShoulder,\r
+\r
+ // Bit mask of all buttons on the left Touch controller\r
+ ovrButton_LMask =\r
+ ovrButton_X | ovrButton_Y | ovrButton_LThumb | ovrButton_LShoulder | ovrButton_Enter,\r
+\r
+ ovrButton_EnumSize = 0x7fffffff ///< \internal Force type int32_t.\r
+} ovrButton;\r
+\r
+/// Describes touch input types.\r
+/// These values map to capacitive touch values reported ovrInputState::Touch.\r
+/// Some of these values are mapped to button bits for consistency.\r
+typedef enum ovrTouch_ {\r
+ ovrTouch_A = ovrButton_A,\r
+ ovrTouch_B = ovrButton_B,\r
+ ovrTouch_RThumb = ovrButton_RThumb,\r
+ ovrTouch_RThumbRest = 0x00000008,\r
+ ovrTouch_RIndexTrigger = 0x00000010,\r
+\r
+ // Bit mask of all the button touches on the right controller\r
+ ovrTouch_RButtonMask =\r
+ ovrTouch_A | ovrTouch_B | ovrTouch_RThumb | ovrTouch_RThumbRest | ovrTouch_RIndexTrigger,\r
+\r
+ ovrTouch_X = ovrButton_X,\r
+ ovrTouch_Y = ovrButton_Y,\r
+ ovrTouch_LThumb = ovrButton_LThumb,\r
+ ovrTouch_LThumbRest = 0x00000800,\r
+ ovrTouch_LIndexTrigger = 0x00001000,\r
+\r
+ // Bit mask of all the button touches on the left controller\r
+ ovrTouch_LButtonMask =\r
+ ovrTouch_X | ovrTouch_Y | ovrTouch_LThumb | ovrTouch_LThumbRest | ovrTouch_LIndexTrigger,\r
+\r
+ // Finger pose state\r
+ // Derived internally based on distance, proximity to sensors and filtering.\r
+ ovrTouch_RIndexPointing = 0x00000020,\r
+ ovrTouch_RThumbUp = 0x00000040,\r
+ ovrTouch_LIndexPointing = 0x00002000,\r
+ ovrTouch_LThumbUp = 0x00004000,\r
+\r
+ // Bit mask of all right controller poses\r
+ ovrTouch_RPoseMask = ovrTouch_RIndexPointing | ovrTouch_RThumbUp,\r
+\r
+ // Bit mask of all left controller poses\r
+ ovrTouch_LPoseMask = ovrTouch_LIndexPointing | ovrTouch_LThumbUp,\r
+\r
+ ovrTouch_EnumSize = 0x7fffffff ///< \internal Force type int32_t.\r
+} ovrTouch;\r
+\r
+/// Describes the Touch Haptics engine.\r
+/// Currently, those values will NOT change during a session.\r
+typedef struct OVR_ALIGNAS(OVR_PTR_SIZE) ovrTouchHapticsDesc_ {\r
+ // Haptics engine frequency/sample-rate, sample time in seconds equals 1.0/sampleRateHz\r
+ int SampleRateHz;\r
+ // Size of each Haptics sample, sample value range is [0, 2^(Bytes*8)-1]\r
+ int SampleSizeInBytes;\r
+\r
+ // Queue size that would guarantee Haptics engine would not starve for data\r
+ // Make sure size doesn't drop below it for best results\r
+ int QueueMinSizeToAvoidStarvation;\r
+\r
+ // Minimum, Maximum and Optimal number of samples that can be sent to Haptics through\r
+ // ovr_SubmitControllerVibration\r
+ int SubmitMinSamples;\r
+ int SubmitMaxSamples;\r
+ int SubmitOptimalSamples;\r
+} ovrTouchHapticsDesc;\r
+\r
+/// Specifies which controller is connected; multiple can be connected at once.\r
+typedef enum ovrControllerType_ {\r
+ ovrControllerType_None = 0x0000,\r
+ ovrControllerType_LTouch = 0x0001,\r
+ ovrControllerType_RTouch = 0x0002,\r
+ ovrControllerType_Touch = (ovrControllerType_LTouch | ovrControllerType_RTouch),\r
+ ovrControllerType_Remote = 0x0004,\r
+\r
+ ovrControllerType_XBox = 0x0010,\r
+\r
+ ovrControllerType_Object0 = 0x0100,\r
+ ovrControllerType_Object1 = 0x0200,\r
+ ovrControllerType_Object2 = 0x0400,\r
+ ovrControllerType_Object3 = 0x0800,\r
+\r
+ ovrControllerType_Active = 0xffffffff, ///< Operate on or query whichever controller is active.\r
+\r
+ ovrControllerType_EnumSize = 0x7fffffff ///< \internal Force type int32_t.\r
+} ovrControllerType;\r
+\r
+/// Haptics buffer submit mode\r
+typedef enum ovrHapticsBufferSubmitMode_ {\r
+ /// Enqueue buffer for later playback\r
+ ovrHapticsBufferSubmit_Enqueue\r
+} ovrHapticsBufferSubmitMode;\r
+\r
+/// Maximum number of samples in ovrHapticsBuffer\r
+#define OVR_HAPTICS_BUFFER_SAMPLES_MAX 256\r
+\r
+/// Haptics buffer descriptor, contains amplitude samples used for Touch vibration\r
+typedef struct ovrHapticsBuffer_ {\r
+ /// Samples stored in opaque format\r
+ const void* Samples;\r
+ /// Number of samples (up to OVR_HAPTICS_BUFFER_SAMPLES_MAX)\r
+ int SamplesCount;\r
+ /// How samples are submitted to the hardware\r
+ ovrHapticsBufferSubmitMode SubmitMode;\r
+} ovrHapticsBuffer;\r
+\r
+/// State of the Haptics playback for Touch vibration\r
+typedef struct ovrHapticsPlaybackState_ {\r
+ // Remaining space available to queue more samples\r
+ int RemainingQueueSpace;\r
+\r
+ // Number of samples currently queued\r
+ int SamplesQueued;\r
+} ovrHapticsPlaybackState;\r
+\r
+/// Position tracked devices\r
+typedef enum ovrTrackedDeviceType_ {\r
+ ovrTrackedDevice_None = 0x0000,\r
+ ovrTrackedDevice_HMD = 0x0001,\r
+ ovrTrackedDevice_LTouch = 0x0002,\r
+ ovrTrackedDevice_RTouch = 0x0004,\r
+ ovrTrackedDevice_Touch = (ovrTrackedDevice_LTouch | ovrTrackedDevice_RTouch),\r
+\r
+ ovrTrackedDevice_Object0 = 0x0010,\r
+ ovrTrackedDevice_Object1 = 0x0020,\r
+ ovrTrackedDevice_Object2 = 0x0040,\r
+ ovrTrackedDevice_Object3 = 0x0080,\r
+\r
+ ovrTrackedDevice_All = 0xFFFF,\r
+} ovrTrackedDeviceType;\r
+\r
+/// Boundary types that specified while using the boundary system\r
+typedef enum ovrBoundaryType_ {\r
+ /// Outer boundary - closely represents user setup walls\r
+ ovrBoundary_Outer = 0x0001,\r
+\r
+ /// Play area - safe rectangular area inside outer boundary which can optionally be used to\r
+ /// restrict user interactions and motion.\r
+ ovrBoundary_PlayArea = 0x0100,\r
+} ovrBoundaryType;\r
+\r
+/// Boundary system look and feel\r
+typedef struct ovrBoundaryLookAndFeel_ {\r
+ /// Boundary color (alpha channel is ignored)\r
+ ovrColorf Color;\r
+} ovrBoundaryLookAndFeel;\r
+\r
+/// Provides boundary test information\r
+typedef struct ovrBoundaryTestResult_ {\r
+ /// True if the boundary system is being triggered. Note that due to fade in/out effects this may\r
+ /// not exactly match visibility.\r
+ ovrBool IsTriggering;\r
+\r
+ /// Distance to the closest play area or outer boundary surface.\r
+ float ClosestDistance;\r
+\r
+ /// Closest point on the boundary surface.\r
+ ovrVector3f ClosestPoint;\r
+\r
+ /// Unit surface normal of the closest boundary surface.\r
+ ovrVector3f ClosestPointNormal;\r
+} ovrBoundaryTestResult;\r
+\r
+/// Provides names for the left and right hand array indexes.\r
+///\r
+/// \see ovrInputState, ovrTrackingState\r
+///\r
+typedef enum ovrHandType_ {\r
+ ovrHand_Left = 0,\r
+ ovrHand_Right = 1,\r
+ ovrHand_Count = 2,\r
+ ovrHand_EnumSize = 0x7fffffff ///< \internal Force type int32_t.\r
+} ovrHandType;\r
+\r
+/// ovrInputState describes the complete controller input state, including Oculus Touch,\r
+/// and XBox gamepad. If multiple inputs are connected and used at the same time,\r
+/// their inputs are combined.\r
+typedef struct ovrInputState_ {\r
+ /// System type when the controller state was last updated.\r
+ double TimeInSeconds;\r
+\r
+ /// Values for buttons described by ovrButton.\r
+ unsigned int Buttons;\r
+\r
+ /// Touch values for buttons and sensors as described by ovrTouch.\r
+ unsigned int Touches;\r
+\r
+ /// Left and right finger trigger values (ovrHand_Left and ovrHand_Right), in range 0.0 to 1.0f.\r
+ /// Returns 0 if the value would otherwise be less than 0.1176, for ovrControllerType_XBox.\r
+ /// This has been formally named simply "Trigger". We retain the name IndexTrigger for backwards\r
+ /// code compatibility.\r
+ /// User-facing documentation should refer to it as the Trigger.\r
+ float IndexTrigger[ovrHand_Count];\r
+\r
+ /// Left and right hand trigger values (ovrHand_Left and ovrHand_Right), in the range 0.0 to 1.0f.\r
+ /// This has been formally named "Grip Button". We retain the name HandTrigger for backwards code\r
+ /// compatibility.\r
+ /// User-facing documentation should refer to it as the Grip Button or simply Grip.\r
+ float HandTrigger[ovrHand_Count];\r
+\r
+ /// Horizontal and vertical thumbstick axis values (ovrHand_Left and ovrHand_Right), in the range\r
+ /// of -1.0f to 1.0f.\r
+ /// Returns a deadzone (value 0) per each axis if the value on that axis would otherwise have been\r
+ /// between -.2746 to +.2746, for ovrControllerType_XBox\r
+ ovrVector2f Thumbstick[ovrHand_Count];\r
+\r
+ /// The type of the controller this state is for.\r
+ ovrControllerType ControllerType;\r
+\r
+ /// Left and right finger trigger values (ovrHand_Left and ovrHand_Right), in range 0.0 to 1.0f.\r
+ /// Does not apply a deadzone. Only touch applies a filter.\r
+ /// This has been formally named simply "Trigger". We retain the name IndexTrigger for backwards\r
+ /// code compatibility.\r
+ /// User-facing documentation should refer to it as the Trigger.\r
+ float IndexTriggerNoDeadzone[ovrHand_Count];\r
+\r
+ /// Left and right hand trigger values (ovrHand_Left and ovrHand_Right), in the range 0.0 to 1.0f.\r
+ /// Does not apply a deadzone. Only touch applies a filter.\r
+ /// This has been formally named "Grip Button". We retain the name HandTrigger for backwards code\r
+ /// compatibility.\r
+ /// User-facing documentation should refer to it as the Grip Button or simply Grip.\r
+ float HandTriggerNoDeadzone[ovrHand_Count];\r
+\r
+ /// Horizontal and vertical thumbstick axis values (ovrHand_Left and ovrHand_Right), in the range\r
+ /// -1.0f to 1.0f\r
+ /// Does not apply a deadzone or filter.\r
+ ovrVector2f ThumbstickNoDeadzone[ovrHand_Count];\r
+\r
+ /// Left and right finger trigger values (ovrHand_Left and ovrHand_Right), in range 0.0 to 1.0f.\r
+ /// No deadzone or filter\r
+ /// This has been formally named "Grip Button". We retain the name HandTrigger for backwards code\r
+ /// compatibility.\r
+ /// User-facing documentation should refer to it as the Grip Button or simply Grip.\r
+ float IndexTriggerRaw[ovrHand_Count];\r
+\r
+ /// Left and right hand trigger values (ovrHand_Left and ovrHand_Right), in the range 0.0 to 1.0f.\r
+ /// No deadzone or filter\r
+ /// This has been formally named "Grip Button". We retain the name HandTrigger for backwards code\r
+ /// compatibility.\r
+ /// User-facing documentation should refer to it as the Grip Button or simply Grip.\r
+ float HandTriggerRaw[ovrHand_Count];\r
+\r
+ /// Horizontal and vertical thumbstick axis values (ovrHand_Left and ovrHand_Right), in the range\r
+ /// -1.0f to 1.0f\r
+ /// No deadzone or filter\r
+ ovrVector2f ThumbstickRaw[ovrHand_Count];\r
+} ovrInputState;\r
+\r
+typedef struct ovrCameraIntrinsics_ {\r
+ /// Time in seconds from last change to the parameters\r
+ double LastChangedTime;\r
+\r
+ /// Angles of all 4 sides of viewport\r
+ ovrFovPort FOVPort;\r
+\r
+ /// Near plane of the virtual camera used to match the external camera\r
+ float VirtualNearPlaneDistanceMeters;\r
+\r
+ /// Far plane of the virtual camera used to match the external camera\r
+ float VirtualFarPlaneDistanceMeters;\r
+\r
+ /// Height in pixels of image sensor\r
+ ovrSizei ImageSensorPixelResolution;\r
+\r
+ /// The lens distortion matrix of camera\r
+ ovrMatrix4f LensDistortionMatrix;\r
+\r
+ /// How often, in seconds, the exposure is taken\r
+ double ExposurePeriodSeconds;\r
+\r
+ /// length of the exposure time\r
+ double ExposureDurationSeconds;\r
+\r
+} ovrCameraIntrinsics;\r
+\r
+typedef enum ovrCameraStatusFlags_ {\r
+ /// Initial state of camera\r
+ ovrCameraStatus_None = 0x0,\r
+\r
+ /// Bit set when the camera is connected to the system\r
+ ovrCameraStatus_Connected = 0x1,\r
+\r
+ /// Bit set when the camera is undergoing calibration\r
+ ovrCameraStatus_Calibrating = 0x2,\r
+\r
+ /// Bit set when the camera has tried & failed calibration\r
+ ovrCameraStatus_CalibrationFailed = 0x4,\r
+\r
+ /// Bit set when the camera has tried & passed calibration\r
+ ovrCameraStatus_Calibrated = 0x8,\r
+\r
+ /// Bit set when the camera is capturing\r
+ ovrCameraStatus_Capturing = 0x10,\r
+\r
+ ovrCameraStatus_EnumSize = 0x7fffffff ///< \internal Force type int32_t.\r
+} ovrCameraStatusFlags;\r
+\r
+typedef struct ovrCameraExtrinsics_ {\r
+ /// Time in seconds from last change to the parameters.\r
+ /// For instance, if the pose changes, or a camera exposure happens, this struct will be updated.\r
+ double LastChangedTimeSeconds;\r
+\r
+ /// Current Status of the camera, a mix of bits from ovrCameraStatusFlags\r
+ unsigned int CameraStatusFlags;\r
+\r
+ /// Which Tracked device, if any, is the camera rigidly attached to\r
+ /// If set to ovrTrackedDevice_None, then the camera is not attached to a tracked object.\r
+ /// If the external camera moves while unattached (i.e. set to ovrTrackedDevice_None), its Pose\r
+ /// won't be updated\r
+ ovrTrackedDeviceType AttachedToDevice;\r
+\r
+ /// The relative Pose of the External Camera.\r
+ /// If AttachedToDevice is ovrTrackedDevice_None, then this is a absolute pose in tracking space\r
+ ovrPosef RelativePose;\r
+\r
+ /// The time, in seconds, when the last successful exposure was taken\r
+ double LastExposureTimeSeconds;\r
+\r
+ /// Estimated exposure latency to get from the exposure time to the system\r
+ double ExposureLatencySeconds;\r
+\r
+ /// Additional latency to get from the exposure time of the real camera to match the render time\r
+ /// of the virtual camera\r
+ double AdditionalLatencySeconds;\r
+\r
+} ovrCameraExtrinsics;\r
+#define OVR_MAX_EXTERNAL_CAMERA_COUNT 16\r
+#define OVR_EXTERNAL_CAMERA_NAME_SIZE 32\r
+typedef struct ovrExternalCamera_ {\r
+ char Name[OVR_EXTERNAL_CAMERA_NAME_SIZE]; // camera identifier: vid + pid + serial number etc.\r
+ ovrCameraIntrinsics Intrinsics;\r
+ ovrCameraExtrinsics Extrinsics;\r
+} ovrExternalCamera;\r
+\r
+//-----------------------------------------------------------------------------------\r
+// ***** Initialize structures\r
+\r
+/// Initialization flags.\r
+///\r
+/// \see ovrInitParams, ovr_Initialize\r
+///\r
+typedef enum ovrInitFlags_ {\r
+ /// When a debug library is requested, a slower debugging version of the library will\r
+ /// run which can be used to help solve problems in the library and debug application code.\r
+ ovrInit_Debug = 0x00000001,\r
+\r
+\r
+ /// When a version is requested, the LibOVR runtime respects the RequestedMinorVersion\r
+ /// field and verifies that the RequestedMinorVersion is supported. Normally when you\r
+ /// specify this flag you simply use OVR_MINOR_VERSION for ovrInitParams::RequestedMinorVersion,\r
+ /// though you could use a lower version than OVR_MINOR_VERSION to specify previous\r
+ /// version behavior.\r
+ ovrInit_RequestVersion = 0x00000004,\r
+\r
+\r
+ /// This client will not be visible in the HMD.\r
+ /// Typically set by diagnostic or debugging utilities.\r
+ ovrInit_Invisible = 0x00000010,\r
+\r
+ /// This client will alternate between VR and 2D rendering.\r
+ /// Typically set by game engine editors and VR-enabled web browsers.\r
+ ovrInit_MixedRendering = 0x00000020,\r
+\r
+ /// This client is aware of ovrSessionStatus focus states (e.g. ovrSessionStatus::HasInputFocus),\r
+ /// and responds to them appropriately (e.g. pauses and stops drawing hands when lacking focus).\r
+ ovrInit_FocusAware = 0x00000040,\r
+\r
+\r
+\r
+\r
+\r
+ /// These bits are writable by user code.\r
+ ovrinit_WritableBits = 0x00ffffff,\r
+\r
+ ovrInit_EnumSize = 0x7fffffff ///< \internal Force type int32_t.\r
+} ovrInitFlags;\r
+\r
+/// Logging levels\r
+///\r
+/// \see ovrInitParams, ovrLogCallback\r
+///\r
+typedef enum ovrLogLevel_ {\r
+ ovrLogLevel_Debug = 0, ///< Debug-level log event.\r
+ ovrLogLevel_Info = 1, ///< Info-level log event.\r
+ ovrLogLevel_Error = 2, ///< Error-level log event.\r
+\r
+ ovrLogLevel_EnumSize = 0x7fffffff ///< \internal Force type int32_t.\r
+} ovrLogLevel;\r
+\r
+/// Signature of the logging callback function pointer type.\r
+///\r
+/// \param[in] userData is an arbitrary value specified by the user of ovrInitParams.\r
+/// \param[in] level is one of the ovrLogLevel constants.\r
+/// \param[in] message is a UTF8-encoded null-terminated string.\r
+/// \see ovrInitParams, ovrLogLevel, ovr_Initialize\r
+///\r
+typedef void(OVR_CDECL* ovrLogCallback)(uintptr_t userData, int level, const char* message);\r
+\r
+/// Parameters for ovr_Initialize.\r
+///\r
+/// \see ovr_Initialize\r
+///\r
+typedef struct OVR_ALIGNAS(8) ovrInitParams_ {\r
+ /// Flags from ovrInitFlags to override default behavior.\r
+ /// Use 0 for the defaults.\r
+ uint32_t Flags;\r
+\r
+ /// Requests a specific minor version of the LibOVR runtime.\r
+ /// Flags must include ovrInit_RequestVersion or this will be ignored and OVR_MINOR_VERSION\r
+ /// will be used. If you are directly calling the LibOVRRT version of ovr_Initialize\r
+ /// in the LibOVRRT DLL then this must be valid and include ovrInit_RequestVersion.\r
+ uint32_t RequestedMinorVersion;\r
+\r
+ /// User-supplied log callback function, which may be called at any time\r
+ /// asynchronously from multiple threads until ovr_Shutdown completes.\r
+ /// Use NULL to specify no log callback.\r
+ ovrLogCallback LogCallback;\r
+\r
+ /// User-supplied data which is passed as-is to LogCallback. Typically this\r
+ /// is used to store an application-specific pointer which is read in the\r
+ /// callback function.\r
+ uintptr_t UserData;\r
+\r
+ /// Relative number of milliseconds to wait for a connection to the server\r
+ /// before failing. Use 0 for the default timeout.\r
+ uint32_t ConnectionTimeoutMS;\r
+\r
+ OVR_ON64(OVR_UNUSED_STRUCT_PAD(pad0, 4)) ///< \internal\r
+\r
+} ovrInitParams;\r
+\r
+#ifdef __cplusplus\r
+extern "C" {\r
+#endif\r
+\r
+#if !defined(OVR_EXPORTING_CAPI)\r
+\r
+// -----------------------------------------------------------------------------------\r
+// ***** API Interfaces\r
+\r
+/// Initializes LibOVR\r
+///\r
+/// Initialize LibOVR for application usage. This includes finding and loading the LibOVRRT\r
+/// shared library. No LibOVR API functions, other than ovr_GetLastErrorInfo and ovr_Detect, can\r
+/// be called unless ovr_Initialize succeeds. A successful call to ovr_Initialize must be eventually\r
+/// followed by a call to ovr_Shutdown. ovr_Initialize calls are idempotent.\r
+/// Calling ovr_Initialize twice does not require two matching calls to ovr_Shutdown.\r
+/// If already initialized, the return value is ovr_Success.\r
+///\r
+/// LibOVRRT shared library search order:\r
+/// -# Current working directory (often the same as the application directory).\r
+/// -# Module directory (usually the same as the application directory,\r
+/// but not if the module is a separate shared library).\r
+/// -# Application directory\r
+/// -# Development directory (only if OVR_ENABLE_DEVELOPER_SEARCH is enabled,\r
+/// which is off by default).\r
+/// -# Standard OS shared library search location(s) (OS-specific).\r
+///\r
+/// \param params Specifies custom initialization options. May be NULL to indicate default options\r
+/// when using the CAPI shim. If you are directly calling the LibOVRRT version of\r
+/// ovr_Initialize in the LibOVRRT DLL then this must be valid and\r
+/// include ovrInit_RequestVersion.\r
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use\r
+/// ovr_GetLastErrorInfo to get more information. Example failed results include:\r
+/// - ovrError_Initialize: Generic initialization error.\r
+/// - ovrError_LibLoad: Couldn't load LibOVRRT.\r
+/// - ovrError_LibVersion: LibOVRRT version incompatibility.\r
+/// - ovrError_ServiceConnection: Couldn't connect to the OVR Service.\r
+/// - ovrError_ServiceVersion: OVR Service version incompatibility.\r
+/// - ovrError_IncompatibleOS: The operating system version is incompatible.\r
+/// - ovrError_DisplayInit: Unable to initialize the HMD display.\r
+/// - ovrError_ServerStart: Unable to start the server. Is it already running?\r
+/// - ovrError_Reinitialization: Attempted to re-initialize with a different version.\r
+///\r
+/// <b>Example code</b>\r
+/// \code{.cpp}\r
+/// ovrInitParams initParams = { ovrInit_RequestVersion, OVR_MINOR_VERSION, NULL, 0, 0 };\r
+/// ovrResult result = ovr_Initialize(&initParams);\r
+/// if(OVR_FAILURE(result)) {\r
+/// ovrErrorInfo errorInfo;\r
+/// ovr_GetLastErrorInfo(&errorInfo);\r
+/// DebugLog("ovr_Initialize failed: %s", errorInfo.ErrorString);\r
+/// return false;\r
+/// }\r
+/// [...]\r
+/// \endcode\r
+///\r
+/// \see ovr_Shutdown\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_Initialize(const ovrInitParams* params);\r
+\r
+/// Shuts down LibOVR\r
+///\r
+/// A successful call to ovr_Initialize must be eventually matched by a call to ovr_Shutdown.\r
+/// After calling ovr_Shutdown, no LibOVR functions can be called except ovr_GetLastErrorInfo\r
+/// or another ovr_Initialize. ovr_Shutdown invalidates all pointers, references, and created\r
+/// objects\r
+/// previously returned by LibOVR functions. The LibOVRRT shared library can be unloaded by\r
+/// ovr_Shutdown.\r
+///\r
+/// \see ovr_Initialize\r
+///\r
+OVR_PUBLIC_FUNCTION(void) ovr_Shutdown();\r
+\r
+/// Returns information about the most recent failed return value by the\r
+/// current thread for this library.\r
+///\r
+/// This function itself can never generate an error.\r
+/// The last error is never cleared by LibOVR, but will be overwritten by new errors.\r
+/// Do not use this call to determine if there was an error in the last API\r
+/// call as successful API calls don't clear the last ovrErrorInfo.\r
+/// To avoid any inconsistency, ovr_GetLastErrorInfo should be called immediately\r
+/// after an API function that returned a failed ovrResult, with no other API\r
+/// functions called in the interim.\r
+///\r
+/// \param[out] errorInfo The last ovrErrorInfo for the current thread.\r
+///\r
+/// \see ovrErrorInfo\r
+///\r
+OVR_PUBLIC_FUNCTION(void) ovr_GetLastErrorInfo(ovrErrorInfo* errorInfo);\r
+\r
+/// Returns the version string representing the LibOVRRT version.\r
+///\r
+/// The returned string pointer is valid until the next call to ovr_Shutdown.\r
+///\r
+/// Note that the returned version string doesn't necessarily match the current\r
+/// OVR_MAJOR_VERSION, etc., as the returned string refers to the LibOVRRT shared\r
+/// library version and not the locally compiled interface version.\r
+///\r
+/// The format of this string is subject to change in future versions and its contents\r
+/// should not be interpreted.\r
+///\r
+/// \return Returns a UTF8-encoded null-terminated version string.\r
+///\r
+OVR_PUBLIC_FUNCTION(const char*) ovr_GetVersionString();\r
+\r
+/// Writes a message string to the LibOVR tracing mechanism (if enabled).\r
+///\r
+/// This message will be passed back to the application via the ovrLogCallback if\r
+/// it was registered.\r
+///\r
+/// \param[in] level One of the ovrLogLevel constants.\r
+/// \param[in] message A UTF8-encoded null-terminated string.\r
+/// \return returns the strlen of the message or a negative value if the message is too large.\r
+///\r
+/// \see ovrLogLevel, ovrLogCallback\r
+///\r
+OVR_PUBLIC_FUNCTION(int) ovr_TraceMessage(int level, const char* message);\r
+\r
+/// Identify client application info.\r
+///\r
+/// The string is one or more newline-delimited lines of optional info\r
+/// indicating engine name, engine version, engine plugin name, engine plugin\r
+/// version, engine editor. The order of the lines is not relevant. Individual\r
+/// lines are optional. A newline is not necessary at the end of the last line.\r
+/// Call after ovr_Initialize and before the first call to ovr_Create.\r
+/// Each value is limited to 20 characters. Key names such as 'EngineName:'\r
+/// 'EngineVersion:' do not count towards this limit.\r
+///\r
+/// \param[in] identity Specifies one or more newline-delimited lines of optional info:\r
+/// EngineName: %s\n\r
+/// EngineVersion: %s\n\r
+/// EnginePluginName: %s\n\r
+/// EnginePluginVersion: %s\n\r
+/// EngineEditor: <boolean> ('true' or 'false')\n\r
+///\r
+/// <b>Example code</b>\r
+/// \code{.cpp}\r
+/// ovr_IdentifyClient("EngineName: Unity\n"\r
+/// "EngineVersion: 5.3.3\n"\r
+/// "EnginePluginName: OVRPlugin\n"\r
+/// "EnginePluginVersion: 1.2.0\n"\r
+/// "EngineEditor: true");\r
+/// \endcode\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_IdentifyClient(const char* identity);\r
+\r
+//-------------------------------------------------------------------------------------\r
+/// @name HMD Management\r
+///\r
+/// Handles the enumeration, creation, destruction, and properties of an HMD (head-mounted display).\r
+///@{\r
+\r
+/// Returns information about the current HMD.\r
+///\r
+/// ovr_Initialize must be called prior to calling this function,\r
+/// otherwise ovrHmdDesc::Type will be set to ovrHmd_None without\r
+/// checking for the HMD presence.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create() or NULL.\r
+///\r
+/// \return Returns an ovrHmdDesc. If invoked with NULL session argument, ovrHmdDesc::Type\r
+/// set to ovrHmd_None indicates that the HMD is not connected.\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrHmdDesc) ovr_GetHmdDesc(ovrSession session);\r
+\r
+/// Returns the number of attached trackers.\r
+///\r
+/// The number of trackers may change at any time, so this function should be called before use\r
+/// as opposed to once on startup.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+///\r
+/// \return Returns unsigned int count.\r
+///\r
+OVR_PUBLIC_FUNCTION(unsigned int) ovr_GetTrackerCount(ovrSession session);\r
+\r
+/// Returns a given attached tracker description.\r
+///\r
+/// ovr_Initialize must have first been called in order for this to succeed, otherwise the returned\r
+/// trackerDescArray will be zero-initialized. The data returned by this function can change at\r
+/// runtime.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+///\r
+/// \param[in] trackerDescIndex Specifies a tracker index. The valid indexes are in the\r
+/// range of 0 to the tracker count returned by ovr_GetTrackerCount.\r
+///\r
+/// \return Returns ovrTrackerDesc. An empty ovrTrackerDesc will be returned if\r
+/// trackerDescIndex is out of range.\r
+///\r
+/// \see ovrTrackerDesc, ovr_GetTrackerCount\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrTrackerDesc)\r
+ovr_GetTrackerDesc(ovrSession session, unsigned int trackerDescIndex);\r
+\r
+/// Creates a handle to a VR session.\r
+///\r
+/// Upon success the returned ovrSession must be eventually freed with ovr_Destroy when it is no\r
+/// longer needed.\r
+/// A second call to ovr_Create will result in an error return value if the previous session has not\r
+/// been destroyed.\r
+///\r
+/// \param[out] pSession Provides a pointer to an ovrSession which will be written to upon success.\r
+/// \param[out] pLuid Provides a system specific graphics adapter identifier that locates which\r
+/// graphics adapter has the HMD attached. This must match the adapter used by the application\r
+/// or no rendering output will be possible. This is important for stability on multi-adapter\r
+/// systems. An\r
+/// application that simply chooses the default adapter will not run reliably on multi-adapter\r
+/// systems.\r
+/// \return Returns an ovrResult indicating success or failure. Upon failure\r
+/// the returned ovrSession will be NULL.\r
+///\r
+/// <b>Example code</b>\r
+/// \code{.cpp}\r
+/// ovrSession session;\r
+/// ovrGraphicsLuid luid;\r
+/// ovrResult result = ovr_Create(&session, &luid);\r
+/// if(OVR_FAILURE(result))\r
+/// ...\r
+/// \endcode\r
+///\r
+/// \see ovr_Destroy\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_Create(ovrSession* pSession, ovrGraphicsLuid* pLuid);\r
+\r
+/// Destroys the session.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \see ovr_Create\r
+///\r
+OVR_PUBLIC_FUNCTION(void) ovr_Destroy(ovrSession session);\r
+\r
+#endif // !defined(OVR_EXPORTING_CAPI)\r
+\r
+/// Specifies status information for the current session.\r
+///\r
+/// \see ovr_GetSessionStatus\r
+///\r
+typedef struct ovrSessionStatus_ {\r
+ /// True if the process has VR focus and thus is visible in the HMD.\r
+ ovrBool IsVisible;\r
+\r
+ /// True if an HMD is present.\r
+ ovrBool HmdPresent;\r
+\r
+ /// True if the HMD is on the user's head.\r
+ ovrBool HmdMounted;\r
+\r
+ /// True if the session is in a display-lost state. See ovr_SubmitFrame.\r
+ ovrBool DisplayLost;\r
+\r
+ /// True if the application should initiate shutdown.\r
+ ovrBool ShouldQuit;\r
+\r
+ /// True if UX has requested re-centering. Must call ovr_ClearShouldRecenterFlag,\r
+ /// ovr_RecenterTrackingOrigin or ovr_SpecifyTrackingOrigin.\r
+ ovrBool ShouldRecenter;\r
+\r
+ /// True if the application is the foreground application and receives input (e.g. Touch\r
+ /// controller state). If this is false then the application is in the background (but possibly\r
+ /// still visible) should hide any input representations such as hands.\r
+ ovrBool HasInputFocus;\r
+\r
+ /// True if a system overlay is present, such as a dashboard. In this case the application\r
+ /// (if visible) should pause while still drawing, avoid drawing near-field graphics so they\r
+ /// don't visually fight with the system overlay, and consume fewer CPU and GPU resources.\r
+ ovrBool OverlayPresent;\r
+\r
+ /// True if runtime is requesting that the application provide depth buffers with projection\r
+ /// layers.\r
+ ovrBool DepthRequested;\r
+\r
+} ovrSessionStatus;\r
+\r
+#if !defined(OVR_EXPORTING_CAPI)\r
+\r
+/// Returns status information for the application.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[out] sessionStatus Provides an ovrSessionStatus that is filled in.\r
+///\r
+/// \return Returns an ovrResult indicating success or failure. In the case of\r
+/// failure, use ovr_GetLastErrorInfo to get more information.\r
+/// Return values include but aren't limited to:\r
+/// - ovrSuccess: Completed successfully.\r
+/// - ovrError_ServiceConnection: The service connection was lost and the application\r
+/// must destroy the session.\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_GetSessionStatus(ovrSession session, ovrSessionStatus* sessionStatus);\r
+\r
+\r
+/// Query extension support status.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] extension Extension to query.\r
+/// \param[out] outExtensionSupported Set to extension support status. ovrTrue if supported.\r
+///\r
+/// \return Returns an ovrResult indicating success or failure. In the case of\r
+/// failure use ovr_GetLastErrorInfo to get more information.\r
+///\r
+/// \see ovrExtensions\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_IsExtensionSupported(\r
+ ovrSession session,\r
+ ovrExtensions extension,\r
+ ovrBool* outExtensionSupported);\r
+\r
+/// Enable extension. Extensions must be enabled after ovr_Create is called.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] extension Extension to enable.\r
+///\r
+/// \return Returns an ovrResult indicating success or failure. Extension is only\r
+/// enabled if successful. In the case of failure use ovr_GetLastErrorInfo\r
+/// to get more information.\r
+///\r
+/// \see ovrExtensions\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_EnableExtension(ovrSession session, ovrExtensions extension);\r
+\r
+//@}\r
+\r
+//-------------------------------------------------------------------------------------\r
+/// @name Tracking\r
+///\r
+/// Tracking functions handle the position, orientation, and movement of the HMD in space.\r
+///\r
+/// All tracking interface functions are thread-safe, allowing tracking state to be sampled\r
+/// from different threads.\r
+///\r
+///@{\r
+\r
+\r
+/// Sets the tracking origin type\r
+///\r
+/// When the tracking origin is changed, all of the calls that either provide\r
+/// or accept ovrPosef will use the new tracking origin provided.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] origin Specifies an ovrTrackingOrigin to be used for all ovrPosef\r
+///\r
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use\r
+/// ovr_GetLastErrorInfo to get more information.\r
+///\r
+/// \see ovrTrackingOrigin, ovr_GetTrackingOriginType\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_SetTrackingOriginType(ovrSession session, ovrTrackingOrigin origin);\r
+\r
+/// Gets the tracking origin state\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+///\r
+/// \return Returns the ovrTrackingOrigin that was either set by default, or previous set by the\r
+/// application.\r
+///\r
+/// \see ovrTrackingOrigin, ovr_SetTrackingOriginType\r
+OVR_PUBLIC_FUNCTION(ovrTrackingOrigin) ovr_GetTrackingOriginType(ovrSession session);\r
+\r
+/// Re-centers the sensor position and orientation.\r
+///\r
+/// This resets the (x,y,z) positional components and the yaw orientation component of the\r
+/// tracking space for the HMD and controllers using the HMD's current tracking pose.\r
+/// If the caller requires some tweaks on top of the HMD's current tracking pose, consider using\r
+/// ovr_SpecifyTrackingOrigin instead.\r
+///\r
+/// The roll and pitch orientation components are always determined by gravity and cannot\r
+/// be redefined. All future tracking will report values relative to this new reference position.\r
+/// If you are using ovrTrackerPoses then you will need to call ovr_GetTrackerPose after\r
+/// this, because the sensor position(s) will change as a result of this.\r
+///\r
+/// The headset cannot be facing vertically upward or downward but rather must be roughly\r
+/// level otherwise this function will fail with ovrError_InvalidHeadsetOrientation.\r
+///\r
+/// For more info, see the notes on each ovrTrackingOrigin enumeration to understand how\r
+/// recenter will vary slightly in its behavior based on the current ovrTrackingOrigin setting.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+///\r
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use\r
+/// ovr_GetLastErrorInfo to get more information. Return values include but aren't limited\r
+/// to:\r
+/// - ovrSuccess: Completed successfully.\r
+/// - ovrError_InvalidHeadsetOrientation: The headset was facing an invalid direction when\r
+/// attempting recentering, such as facing vertically.\r
+///\r
+/// \see ovrTrackingOrigin, ovr_GetTrackerPose, ovr_SpecifyTrackingOrigin\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_RecenterTrackingOrigin(ovrSession session);\r
+\r
+/// Allows manually tweaking the sensor position and orientation.\r
+///\r
+/// This function is similar to ovr_RecenterTrackingOrigin in that it modifies the\r
+/// (x,y,z) positional components and the yaw orientation component of the tracking space for\r
+/// the HMD and controllers.\r
+///\r
+/// While ovr_RecenterTrackingOrigin resets the tracking origin in reference to the HMD's\r
+/// current pose, ovr_SpecifyTrackingOrigin allows the caller to explicitly specify a transform\r
+/// for the tracking origin. This transform is expected to be an offset to the most recent\r
+/// recentered origin, so calling this function repeatedly with the same originPose will keep\r
+/// nudging the recentered origin in that direction.\r
+///\r
+/// There are several use cases for this function. For example, if the application decides to\r
+/// limit the yaw, or translation of the recentered pose instead of directly using the HMD pose\r
+/// the application can query the current tracking state via ovr_GetTrackingState, and apply\r
+/// some limitations to the HMD pose because feeding this pose back into this function.\r
+/// Similarly, this can be used to "adjust the seating position" incrementally in apps that\r
+/// feature seated experiences such as cockpit-based games.\r
+///\r
+/// This function can emulate ovr_RecenterTrackingOrigin as such:\r
+/// ovrTrackingState ts = ovr_GetTrackingState(session, 0.0, ovrFalse);\r
+/// ovr_SpecifyTrackingOrigin(session, ts.HeadPose.ThePose);\r
+///\r
+/// The roll and pitch orientation components are determined by gravity and cannot be redefined.\r
+/// If you are using ovrTrackerPoses then you will need to call ovr_GetTrackerPose after\r
+/// this, because the sensor position(s) will change as a result of this.\r
+///\r
+/// For more info, see the notes on each ovrTrackingOrigin enumeration to understand how\r
+/// recenter will vary slightly in its behavior based on the current ovrTrackingOrigin setting.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] originPose Specifies a pose that will be used to transform the current tracking\r
+/// origin.\r
+///\r
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use\r
+/// ovr_GetLastErrorInfo to get more information. Return values include but aren't limited\r
+/// to:\r
+/// - ovrSuccess: Completed successfully.\r
+/// - ovrError_InvalidParameter: The heading direction in originPose was invalid,\r
+/// such as facing vertically. This can happen if the caller is directly feeding the pose\r
+/// of a position-tracked device such as an HMD or controller into this function.\r
+///\r
+/// \see ovrTrackingOrigin, ovr_GetTrackerPose, ovr_RecenterTrackingOrigin\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_SpecifyTrackingOrigin(ovrSession session, ovrPosef originPose);\r
+\r
+/// Clears the ShouldRecenter status bit in ovrSessionStatus.\r
+///\r
+/// Clears the ShouldRecenter status bit in ovrSessionStatus, allowing further recenter requests to\r
+/// be detected. Since this is automatically done by ovr_RecenterTrackingOrigin and\r
+/// ovr_SpecifyTrackingOrigin, this function only needs to be called when application is doing\r
+/// its own re-centering logic.\r
+OVR_PUBLIC_FUNCTION(void) ovr_ClearShouldRecenterFlag(ovrSession session);\r
+\r
+/// Returns tracking state reading based on the specified absolute system time.\r
+///\r
+/// Pass an absTime value of 0.0 to request the most recent sensor reading. In this case\r
+/// both PredictedPose and SamplePose will have the same value.\r
+///\r
+/// This may also be used for more refined timing of front buffer rendering logic, and so on.\r
+/// This may be called by multiple threads.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] absTime Specifies the absolute future time to predict the return\r
+/// ovrTrackingState value. Use 0 to request the most recent tracking state.\r
+/// \param[in] latencyMarker Specifies that this call is the point in time where\r
+/// the "App-to-Mid-Photon" latency timer starts from. If a given ovrLayer\r
+/// provides "SensorSampleTime", that will override the value stored here.\r
+/// \return Returns the ovrTrackingState that is predicted for the given absTime.\r
+///\r
+/// \see ovrTrackingState, ovr_GetEyePoses, ovr_GetTimeInSeconds\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrTrackingState)\r
+ovr_GetTrackingState(ovrSession session, double absTime, ovrBool latencyMarker);\r
+\r
+/// Returns an array of poses, where each pose matches a device type provided by the deviceTypes\r
+/// array parameter. If any pose cannot be retrieved, it will return a reason for the missing\r
+/// pose and the device pose will be zeroed out with a pose quaternion [x=0, y=0, z=0, w=1].\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] deviceTypes Array of device types to query for their poses.\r
+/// \param[in] deviceCount Number of queried poses. This number must match the length of the\r
+/// outDevicePoses and deviceTypes array.\r
+/// \param[in] absTime Specifies the absolute future time to predict the return\r
+/// ovrTrackingState value. Use 0 to request the most recent tracking state.\r
+/// \param[out] outDevicePoses Array of poses, one for each device type in deviceTypes arrays.\r
+///\r
+/// \return Returns an ovrResult for which OVR_SUCCESS(result) is false upon error and\r
+/// true upon success.\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_GetDevicePoses(\r
+ ovrSession session,\r
+ ovrTrackedDeviceType* deviceTypes,\r
+ int deviceCount,\r
+ double absTime,\r
+ ovrPoseStatef* outDevicePoses);\r
+\r
+\r
+/// Returns the ovrTrackerPose for the given attached tracker.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] trackerPoseIndex Index of the tracker being requested.\r
+///\r
+/// \return Returns the requested ovrTrackerPose. An empty ovrTrackerPose will be returned if\r
+/// trackerPoseIndex is out of range.\r
+///\r
+/// \see ovr_GetTrackerCount\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrTrackerPose)\r
+ovr_GetTrackerPose(ovrSession session, unsigned int trackerPoseIndex);\r
+\r
+/// Returns the most recent input state for controllers, without positional tracking info.\r
+///\r
+/// \param[out] inputState Input state that will be filled in.\r
+/// \param[in] ovrControllerType Specifies which controller the input will be returned for.\r
+/// \return Returns ovrSuccess if the new state was successfully obtained.\r
+///\r
+/// \see ovrControllerType\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_GetInputState(ovrSession session, ovrControllerType controllerType, ovrInputState* inputState);\r
+\r
+/// Returns controller types connected to the system OR'ed together.\r
+///\r
+/// \return A bitmask of ovrControllerTypes connected to the system.\r
+///\r
+/// \see ovrControllerType\r
+///\r
+OVR_PUBLIC_FUNCTION(unsigned int) ovr_GetConnectedControllerTypes(ovrSession session);\r
+\r
+/// Gets information about Haptics engine for the specified Touch controller.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] controllerType The controller to retrieve the information from.\r
+///\r
+/// \return Returns an ovrTouchHapticsDesc.\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrTouchHapticsDesc)\r
+ovr_GetTouchHapticsDesc(ovrSession session, ovrControllerType controllerType);\r
+\r
+/// Sets constant vibration (with specified frequency and amplitude) to a controller.\r
+/// Note: ovr_SetControllerVibration cannot be used interchangeably with\r
+/// ovr_SubmitControllerVibration.\r
+///\r
+/// This method should be called periodically, vibration lasts for a maximum of 2.5 seconds.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] controllerType The controller to set the vibration to.\r
+/// \param[in] frequency Vibration frequency. Supported values are: 0.0 (disabled), 0.5 and 1.0. Non\r
+/// valid values will be clamped.\r
+/// \param[in] amplitude Vibration amplitude in the [0.0, 1.0] range.\r
+/// \return Returns an ovrResult for which OVR_SUCCESS(result) is false upon error and true\r
+/// upon success. Return values include but aren't limited to:\r
+/// - ovrSuccess: The call succeeded and a result was returned.\r
+/// - ovrSuccess_DeviceUnavailable: The call succeeded but the device referred to by\r
+/// controllerType is not available.\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_SetControllerVibration(\r
+ ovrSession session,\r
+ ovrControllerType controllerType,\r
+ float frequency,\r
+ float amplitude);\r
+\r
+/// Submits a Haptics buffer (used for vibration) to Touch (only) controllers.\r
+/// Note: ovr_SubmitControllerVibration cannot be used interchangeably with\r
+/// ovr_SetControllerVibration.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] controllerType Controller where the Haptics buffer will be played.\r
+/// \param[in] buffer Haptics buffer containing amplitude samples to be played.\r
+/// \return Returns an ovrResult for which OVR_SUCCESS(result) is false upon error and true\r
+/// upon success. Return values include but aren't limited to:\r
+/// - ovrSuccess: The call succeeded and a result was returned.\r
+/// - ovrSuccess_DeviceUnavailable: The call succeeded but the device referred to by\r
+/// controllerType is not available.\r
+///\r
+/// \see ovrHapticsBuffer\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_SubmitControllerVibration(\r
+ ovrSession session,\r
+ ovrControllerType controllerType,\r
+ const ovrHapticsBuffer* buffer);\r
+\r
+/// Gets the Haptics engine playback state of a specific Touch controller.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] controllerType Controller where the Haptics buffer wil be played.\r
+/// \param[in] outState State of the haptics engine.\r
+/// \return Returns an ovrResult for which OVR_SUCCESS(result) is false upon error and true\r
+/// upon success. Return values include but aren't limited to:\r
+/// - ovrSuccess: The call succeeded and a result was returned.\r
+/// - ovrSuccess_DeviceUnavailable: The call succeeded but the device referred to by\r
+/// controllerType is not available.\r
+///\r
+/// \see ovrHapticsPlaybackState\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_GetControllerVibrationState(\r
+ ovrSession session,\r
+ ovrControllerType controllerType,\r
+ ovrHapticsPlaybackState* outState);\r
+\r
+/// Tests collision/proximity of position tracked devices (e.g. HMD and/or Touch) against the\r
+/// Boundary System.\r
+/// Note: this method is similar to ovr_BoundaryTestPoint but can be more precise as it may take\r
+/// into account device acceleration/momentum.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] deviceBitmask Bitmask of one or more tracked devices to test.\r
+/// \param[in] boundaryType Must be either ovrBoundary_Outer or ovrBoundary_PlayArea.\r
+/// \param[out] outTestResult Result of collision/proximity test, contains information such as\r
+/// distance and closest point.\r
+/// \return Returns an ovrResult for which OVR_SUCCESS(result) is false upon error and true\r
+/// upon success. Return values include but aren't limited to:\r
+/// - ovrSuccess: The call succeeded and a result was returned.\r
+/// - ovrSuccess_BoundaryInvalid: The call succeeded but the result is not a valid boundary due\r
+/// to not being set up.\r
+/// - ovrSuccess_DeviceUnavailable: The call succeeded but the device referred to by\r
+/// deviceBitmask is not available.\r
+///\r
+/// \see ovrBoundaryTestResult\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_TestBoundary(\r
+ ovrSession session,\r
+ ovrTrackedDeviceType deviceBitmask,\r
+ ovrBoundaryType boundaryType,\r
+ ovrBoundaryTestResult* outTestResult);\r
+\r
+/// Tests collision/proximity of a 3D point against the Boundary System.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] point 3D point to test.\r
+/// \param[in] singleBoundaryType Must be either ovrBoundary_Outer or ovrBoundary_PlayArea to test\r
+/// against\r
+/// \param[out] outTestResult Result of collision/proximity test, contains information such as\r
+/// distance and closest point.\r
+/// \return Returns an ovrResult for which OVR_SUCCESS(result) is false upon error and true\r
+/// upon success. Return values include but aren't limited to:\r
+/// - ovrSuccess: The call succeeded and a result was returned.\r
+/// - ovrSuccess_BoundaryInvalid: The call succeeded but the result is not a valid boundary due\r
+/// to not being set up.\r
+///\r
+/// \see ovrBoundaryTestResult\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_TestBoundaryPoint(\r
+ ovrSession session,\r
+ const ovrVector3f* point,\r
+ ovrBoundaryType singleBoundaryType,\r
+ ovrBoundaryTestResult* outTestResult);\r
+\r
+/// Sets the look and feel of the Boundary System.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] lookAndFeel Look and feel parameters.\r
+/// \return Returns ovrSuccess upon success.\r
+/// \see ovrBoundaryLookAndFeel\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_SetBoundaryLookAndFeel(ovrSession session, const ovrBoundaryLookAndFeel* lookAndFeel);\r
+\r
+/// Resets the look and feel of the Boundary System to its default state.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \return Returns ovrSuccess upon success.\r
+/// \see ovrBoundaryLookAndFeel\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_ResetBoundaryLookAndFeel(ovrSession session);\r
+\r
+/// Gets the geometry of the Boundary System's "play area" or "outer boundary" as 3D floor points.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] boundaryType Must be either ovrBoundary_Outer or ovrBoundary_PlayArea.\r
+/// \param[out] outFloorPoints Array of 3D points (in clockwise order) defining the boundary at\r
+/// floor height (can be NULL to retrieve only the number of points).\r
+/// \param[out] outFloorPointsCount Number of 3D points returned in the array.\r
+/// \return Returns an ovrResult for which OVR_SUCCESS(result) is false upon error and true\r
+/// upon success. Return values include but aren't limited to:\r
+/// - ovrSuccess: The call succeeded and a result was returned.\r
+/// - ovrSuccess_BoundaryInvalid: The call succeeded but the result is not a valid boundary due\r
+/// to not being set up.\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_GetBoundaryGeometry(\r
+ ovrSession session,\r
+ ovrBoundaryType boundaryType,\r
+ ovrVector3f* outFloorPoints,\r
+ int* outFloorPointsCount);\r
+\r
+/// Gets the dimension of the Boundary System's "play area" or "outer boundary".\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] boundaryType Must be either ovrBoundary_Outer or ovrBoundary_PlayArea.\r
+/// \param[out] outDimensions Dimensions of the axis aligned bounding box that encloses the area in\r
+/// meters (width, height and length).\r
+/// \return Returns an ovrResult for which OVR_SUCCESS(result) is false upon error and true\r
+/// upon success. Return values include but aren't limited to:\r
+/// - ovrSuccess: The call succeeded and a result was returned.\r
+/// - ovrSuccess_BoundaryInvalid: The call succeeded but the result is not a valid boundary due\r
+/// to not being set up.\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_GetBoundaryDimensions(\r
+ ovrSession session,\r
+ ovrBoundaryType boundaryType,\r
+ ovrVector3f* outDimensions);\r
+\r
+/// Returns if the boundary is currently visible.\r
+/// Note: visibility is false if the user has turned off boundaries, otherwise, it's true if\r
+/// the app has requested boundaries to be visible or if any tracked device is currently\r
+/// triggering it. This may not exactly match rendering due to fade-in and fade-out effects.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[out] outIsVisible ovrTrue, if the boundary is visible.\r
+/// \return Returns an ovrResult for which OVR_SUCCESS(result) is false upon error and true\r
+/// upon success. Return values include but aren't limited to:\r
+/// - ovrSuccess: Result was successful and a result was returned.\r
+/// - ovrSuccess_BoundaryInvalid: The call succeeded but the result is not a valid boundary due\r
+/// to not being set up.\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetBoundaryVisible(ovrSession session, ovrBool* outIsVisible);\r
+\r
+/// Requests boundary to be visible.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] visible forces the outer boundary to be visible. An application can't force it\r
+/// to be invisible, but can cancel its request by passing false.\r
+/// \return Returns ovrSuccess upon success.\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_RequestBoundaryVisible(ovrSession session, ovrBool visible);\r
+\r
+// -----------------------------------------------------------------------------------\r
+/// @name Mixed reality capture support\r
+///\r
+/// Defines functions used for mixed reality capture / third person cameras.\r
+///\r
+\r
+/// Returns the number of camera properties of all cameras\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in out] cameras Pointer to the array. If null and the provided array capacity is\r
+/// sufficient, will return ovrError_NullArrayPointer.\r
+/// \param[in out] inoutCameraCount Supply the\r
+/// array capacity, will return the actual # of cameras defined. If *inoutCameraCount is too small,\r
+/// will return ovrError_InsufficientArraySize.\r
+/// \return Returns the list of external cameras the system knows about.\r
+/// Returns ovrError_NoExternalCameraInfo if there is not any eternal camera information.\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_GetExternalCameras(\r
+ ovrSession session,\r
+ ovrExternalCamera* cameras,\r
+ unsigned int* inoutCameraCount);\r
+\r
+/// Sets the camera intrinsics and/or extrinsics stored for the cameraName camera\r
+/// Names must be < 32 characters and null-terminated.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] name Specifies which camera to set the intrinsics or extrinsics for.\r
+/// The name must be at most OVR_EXTERNAL_CAMERA_NAME_SIZE - 1\r
+/// characters. Otherwise, ovrError_ExternalCameraNameWrongSize is returned.\r
+/// \param[in] intrinsics Contains the intrinsic parameters to set, can be null\r
+/// \param[in] extrinsics Contains the extrinsic parameters to set, can be null\r
+/// \return Returns ovrSuccess or an ovrError code\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_SetExternalCameraProperties(\r
+ ovrSession session,\r
+ const char* name,\r
+ const ovrCameraIntrinsics* const intrinsics,\r
+ const ovrCameraExtrinsics* const extrinsics);\r
+\r
+///@}\r
+\r
+#endif // !defined(OVR_EXPORTING_CAPI)\r
+\r
+//-------------------------------------------------------------------------------------\r
+// @name Layers\r
+//\r
+///@{\r
+\r
+/// Specifies the maximum number of layers supported by ovr_SubmitFrame.\r
+///\r
+/// /see ovr_SubmitFrame\r
+///\r
+enum { ovrMaxLayerCount = 16 };\r
+\r
+/// Describes layer types that can be passed to ovr_SubmitFrame.\r
+/// Each layer type has an associated struct, such as ovrLayerEyeFov.\r
+///\r
+/// \see ovrLayerHeader\r
+///\r
+typedef enum ovrLayerType_ {\r
+ /// Layer is disabled.\r
+ ovrLayerType_Disabled = 0,\r
+\r
+ /// Described by ovrLayerEyeFov.\r
+ ovrLayerType_EyeFov = 1,\r
+\r
+ /// Described by ovrLayerEyeFovDepth.\r
+ ovrLayerType_EyeFovDepth = 2,\r
+\r
+ /// Described by ovrLayerQuad. Previously called ovrLayerType_QuadInWorld.\r
+ ovrLayerType_Quad = 3,\r
+\r
+ // enum 4 used to be ovrLayerType_QuadHeadLocked. Instead, use ovrLayerType_Quad with\r
+ // ovrLayerFlag_HeadLocked.\r
+\r
+ /// Described by ovrLayerEyeMatrix.\r
+ ovrLayerType_EyeMatrix = 5,\r
+\r
+\r
+ /// Described by ovrLayerEyeFovMultires.\r
+ ovrLayerType_EyeFovMultires = 7,\r
+\r
+ /// Described by ovrLayerCylinder.\r
+ ovrLayerType_Cylinder = 8,\r
+\r
+ /// Described by ovrLayerCube\r
+ ovrLayerType_Cube = 10,\r
+\r
+\r
+ ovrLayerType_EnumSize = 0x7fffffff ///< Force type int32_t.\r
+\r
+} ovrLayerType;\r
+\r
+/// Identifies flags used by ovrLayerHeader and which are passed to ovr_SubmitFrame.\r
+///\r
+/// \see ovrLayerHeader\r
+///\r
+typedef enum ovrLayerFlags_ {\r
+ /// ovrLayerFlag_HighQuality enables 4x anisotropic sampling during the composition of the layer.\r
+ /// The benefits are mostly visible at the periphery for high-frequency & high-contrast visuals.\r
+ /// For best results consider combining this flag with an ovrTextureSwapChain that has mipmaps and\r
+ /// instead of using arbitrary sized textures, prefer texture sizes that are powers-of-two.\r
+ /// Actual rendered viewport and doesn't necessarily have to fill the whole texture.\r
+ ovrLayerFlag_HighQuality = 0x01,\r
+\r
+ /// ovrLayerFlag_TextureOriginAtBottomLeft: the opposite is TopLeft.\r
+ /// Generally this is false for D3D, true for OpenGL.\r
+ ovrLayerFlag_TextureOriginAtBottomLeft = 0x02,\r
+\r
+ /// Mark this surface as "headlocked", which means it is specified\r
+ /// relative to the HMD and moves with it, rather than being specified\r
+ /// relative to sensor/torso space and remaining still while the head moves.\r
+ /// What used to be ovrLayerType_QuadHeadLocked is now ovrLayerType_Quad plus this flag.\r
+ /// However the flag can be applied to any layer type to achieve a similar effect.\r
+ ovrLayerFlag_HeadLocked = 0x04,\r
+\r
+\r
+} ovrLayerFlags;\r
+\r
+/// Defines properties shared by all ovrLayer structs, such as ovrLayerEyeFov.\r
+///\r
+/// ovrLayerHeader is used as a base member in these larger structs.\r
+/// This struct cannot be used by itself except for the case that Type is ovrLayerType_Disabled.\r
+///\r
+/// \see ovrLayerType, ovrLayerFlags\r
+///\r
+typedef struct OVR_ALIGNAS(OVR_PTR_SIZE) ovrLayerHeader_ {\r
+ ovrLayerType Type; ///< Described by ovrLayerType.\r
+ unsigned Flags; ///< Described by ovrLayerFlags.\r
+} ovrLayerHeader;\r
+\r
+/// Describes a layer that specifies a monoscopic or stereoscopic view.\r
+/// This is the kind of layer that's typically used as layer 0 to ovr_SubmitFrame,\r
+/// as it is the kind of layer used to render a 3D stereoscopic view.\r
+///\r
+/// Three options exist with respect to mono/stereo texture usage:\r
+/// - ColorTexture[0] and ColorTexture[1] contain the left and right stereo renderings,\r
+/// respectively.\r
+/// Viewport[0] and Viewport[1] refer to ColorTexture[0] and ColorTexture[1], respectively.\r
+/// - ColorTexture[0] contains both the left and right renderings, ColorTexture[1] is NULL,\r
+/// and Viewport[0] and Viewport[1] refer to sub-rects with ColorTexture[0].\r
+/// - ColorTexture[0] contains a single monoscopic rendering, and Viewport[0] and\r
+/// Viewport[1] both refer to that rendering.\r
+///\r
+/// \see ovrTextureSwapChain, ovr_SubmitFrame\r
+///\r
+typedef struct OVR_ALIGNAS(OVR_PTR_SIZE) ovrLayerEyeFov_ {\r
+ /// Header.Type must be ovrLayerType_EyeFov.\r
+ ovrLayerHeader Header;\r
+\r
+ /// ovrTextureSwapChains for the left and right eye respectively.\r
+ /// The second one of which can be NULL for cases described above.\r
+ ovrTextureSwapChain ColorTexture[ovrEye_Count];\r
+\r
+ /// Specifies the ColorTexture sub-rect UV coordinates.\r
+ /// Both Viewport[0] and Viewport[1] must be valid.\r
+ ovrRecti Viewport[ovrEye_Count];\r
+\r
+ /// The viewport field of view.\r
+ ovrFovPort Fov[ovrEye_Count];\r
+\r
+ /// Specifies the position and orientation of each eye view, with position specified in meters.\r
+ /// RenderPose will typically be the value returned from ovr_CalcEyePoses,\r
+ /// but can be different in special cases if a different head pose is used for rendering.\r
+ ovrPosef RenderPose[ovrEye_Count];\r
+\r
+ /// Specifies the timestamp when the source ovrPosef (used in calculating RenderPose)\r
+ /// was sampled from the SDK. Typically retrieved by calling ovr_GetTimeInSeconds\r
+ /// around the instant the application calls ovr_GetTrackingState\r
+ /// The main purpose for this is to accurately track app tracking latency.\r
+ double SensorSampleTime;\r
+\r
+} ovrLayerEyeFov;\r
+\r
+/// Describes a layer that specifies a monoscopic or stereoscopic view,\r
+/// with depth textures in addition to color textures. This is typically used to support\r
+/// positional time warp. This struct is the same as ovrLayerEyeFov, but with the addition\r
+/// of DepthTexture and ProjectionDesc.\r
+///\r
+/// ProjectionDesc can be created using ovrTimewarpProjectionDesc_FromProjection.\r
+///\r
+/// Three options exist with respect to mono/stereo texture usage:\r
+/// - ColorTexture[0] and ColorTexture[1] contain the left and right stereo renderings,\r
+/// respectively.\r
+/// Viewport[0] and Viewport[1] refer to ColorTexture[0] and ColorTexture[1], respectively.\r
+/// - ColorTexture[0] contains both the left and right renderings, ColorTexture[1] is NULL,\r
+/// and Viewport[0] and Viewport[1] refer to sub-rects with ColorTexture[0].\r
+/// - ColorTexture[0] contains a single monoscopic rendering, and Viewport[0] and\r
+/// Viewport[1] both refer to that rendering.\r
+///\r
+/// \see ovrTextureSwapChain, ovr_SubmitFrame\r
+///\r
+typedef struct OVR_ALIGNAS(OVR_PTR_SIZE) ovrLayerEyeFovDepth_ {\r
+ /// Header.Type must be ovrLayerType_EyeFovDepth.\r
+ ovrLayerHeader Header;\r
+\r
+ /// ovrTextureSwapChains for the left and right eye respectively.\r
+ /// The second one of which can be NULL for cases described above.\r
+ ovrTextureSwapChain ColorTexture[ovrEye_Count];\r
+\r
+ /// Specifies the ColorTexture sub-rect UV coordinates.\r
+ /// Both Viewport[0] and Viewport[1] must be valid.\r
+ ovrRecti Viewport[ovrEye_Count];\r
+\r
+ /// The viewport field of view.\r
+ ovrFovPort Fov[ovrEye_Count];\r
+\r
+ /// Specifies the position and orientation of each eye view, with position specified in meters.\r
+ /// RenderPose will typically be the value returned from ovr_CalcEyePoses,\r
+ /// but can be different in special cases if a different head pose is used for rendering.\r
+ ovrPosef RenderPose[ovrEye_Count];\r
+\r
+ /// Specifies the timestamp when the source ovrPosef (used in calculating RenderPose)\r
+ /// was sampled from the SDK. Typically retrieved by calling ovr_GetTimeInSeconds\r
+ /// around the instant the application calls ovr_GetTrackingState\r
+ /// The main purpose for this is to accurately track app tracking latency.\r
+ double SensorSampleTime;\r
+\r
+ /// Depth texture for positional timewarp.\r
+ /// Must map 1:1 to the ColorTexture.\r
+ ovrTextureSwapChain DepthTexture[ovrEye_Count];\r
+\r
+ /// Specifies how to convert DepthTexture information into meters.\r
+ /// \see ovrTimewarpProjectionDesc_FromProjection\r
+ ovrTimewarpProjectionDesc ProjectionDesc;\r
+\r
+} ovrLayerEyeFovDepth;\r
+\r
+/// Describes eye texture layouts. Used with ovrLayerEyeFovMultires.\r
+///\r
+typedef enum ovrTextureLayout_ {\r
+ ovrTextureLayout_Rectilinear = 0, ///< Regular eyeFov layer.\r
+ ovrTextureLayout_Octilinear = 1, ///< Octilinear extension must be enabled.\r
+ ovrTextureLayout_EnumSize = 0x7fffffff ///< Force type int32_t.\r
+} ovrTextureLayout;\r
+\r
+/// Multiresolution descriptor for Octilinear.\r
+///\r
+/// Usage of this layer must be successfully enabled via ovr_EnableExtension\r
+/// before it can be used.\r
+///\r
+/// \see ovrLayerEyeFovMultres\r
+///\r
+typedef struct OVR_ALIGNAS(OVR_PTR_SIZE) ovrTextureLayoutOctilinear_ {\r
+ // W warping\r
+ float WarpLeft;\r
+ float WarpRight;\r
+ float WarpUp;\r
+ float WarpDown;\r
+\r
+ // Size of W quadrants.\r
+ //\r
+ // SizeLeft + SizeRight <= Viewport.Size.w\r
+ // SizeUp + sizeDown <= Viewport.Size.h\r
+ //\r
+ // Clip space (0,0) is located at Viewport.Pos + (SizeLeft,SizeUp) where\r
+ // Viewport is given in the layer description.\r
+ //\r
+ // Viewport Top left\r
+ // +-----------------------------------------------------+\r
+ // | ^ | |\r
+ // | | | |\r
+ // | 0 SizeUp 1 | |\r
+ // | | |<--Portion of viewport\r
+ // | | | determined by sizes\r
+ // | | | |\r
+ // |<--------SizeLeft-------+-------SizeRight------>| |\r
+ // | | | |\r
+ // | | | |\r
+ // | 2 SizeDown 3 | |\r
+ // | | | |\r
+ // | | | |\r
+ // | v | |\r
+ // +------------------------------------------------+ |\r
+ // | |\r
+ // +-----------------------------------------------------+\r
+ // Viewport bottom right\r
+ //\r
+ // For example, when rendering quadrant 0 its scissor rectangle will be\r
+ //\r
+ // Top = 0\r
+ // Left = 0\r
+ // Right = SizeLeft\r
+ // Bottom = SizeUp\r
+ //\r
+ // and the scissor rectangle for quadrant 1 will be:\r
+ //\r
+ // Top = 0\r
+ // Left = SizeLeft\r
+ // Right = SizeLeft + SizeRight\r
+ // Bottom = SizeUp\r
+ //\r
+ float SizeLeft;\r
+ float SizeRight;\r
+ float SizeUp;\r
+ float SizeDown;\r
+\r
+} ovrTextureLayoutOctilinear;\r
+\r
+/// Combines texture layout descriptors.\r
+///\r
+typedef union OVR_ALIGNAS(OVR_PTR_SIZE) ovrTextureLayoutDesc_Union_ {\r
+ ovrTextureLayoutOctilinear Octilinear[ovrEye_Count];\r
+} ovrTextureLayoutDesc_Union;\r
+\r
+/// Describes a layer that specifies a monoscopic or stereoscopic view with\r
+/// support for optional multiresolution textures. This struct is the same as\r
+/// ovrLayerEyeFov plus texture layout parameters.\r
+///\r
+/// Three options exist with respect to mono/stereo texture usage:\r
+/// - ColorTexture[0] and ColorTexture[1] contain the left and right stereo renderings,\r
+/// respectively.\r
+/// Viewport[0] and Viewport[1] refer to ColorTexture[0] and ColorTexture[1], respectively.\r
+/// - ColorTexture[0] contains both the left and right renderings, ColorTexture[1] is NULL,\r
+/// and Viewport[0] and Viewport[1] refer to sub-rects with ColorTexture[0].\r
+/// - ColorTexture[0] contains a single monoscopic rendering, and Viewport[0] and\r
+/// Viewport[1] both refer to that rendering.\r
+///\r
+/// \see ovrTextureSwapChain, ovr_SubmitFrame\r
+///\r
+typedef struct OVR_ALIGNAS(OVR_PTR_SIZE) ovrLayerEyeFovMultires_ {\r
+ /// Header.Type must be ovrLayerType_EyeFovMultires.\r
+ ovrLayerHeader Header;\r
+\r
+ /// ovrTextureSwapChains for the left and right eye respectively.\r
+ /// The second one of which can be NULL for cases described above.\r
+ ovrTextureSwapChain ColorTexture[ovrEye_Count];\r
+\r
+ /// Specifies the ColorTexture sub-rect UV coordinates.\r
+ /// Both Viewport[0] and Viewport[1] must be valid.\r
+ ovrRecti Viewport[ovrEye_Count];\r
+\r
+ /// The viewport field of view.\r
+ ovrFovPort Fov[ovrEye_Count];\r
+\r
+ /// Specifies the position and orientation of each eye view, with position specified in meters.\r
+ /// RenderPose will typically be the value returned from ovr_CalcEyePoses,\r
+ /// but can be different in special cases if a different head pose is used for rendering.\r
+ ovrPosef RenderPose[ovrEye_Count];\r
+\r
+ /// Specifies the timestamp when the source ovrPosef (used in calculating RenderPose)\r
+ /// was sampled from the SDK. Typically retrieved by calling ovr_GetTimeInSeconds\r
+ /// around the instant the application calls ovr_GetTrackingState\r
+ /// The main purpose for this is to accurately track app tracking latency.\r
+ double SensorSampleTime;\r
+\r
+ /// Specifies layout type of textures.\r
+ ovrTextureLayout TextureLayout;\r
+\r
+ /// Specifies texture layout parameters.\r
+ ovrTextureLayoutDesc_Union TextureLayoutDesc;\r
+\r
+} ovrLayerEyeFovMultires;\r
+\r
+/// Describes a layer that specifies a monoscopic or stereoscopic view.\r
+/// This uses a direct 3x4 matrix to map from view space to the UV coordinates.\r
+/// It is essentially the same thing as ovrLayerEyeFov but using a much\r
+/// lower level. This is mainly to provide compatibility with specific apps.\r
+/// Unless the application really requires this flexibility, it is usually better\r
+/// to use ovrLayerEyeFov.\r
+///\r
+/// Three options exist with respect to mono/stereo texture usage:\r
+/// - ColorTexture[0] and ColorTexture[1] contain the left and right stereo renderings,\r
+/// respectively.\r
+/// Viewport[0] and Viewport[1] refer to ColorTexture[0] and ColorTexture[1], respectively.\r
+/// - ColorTexture[0] contains both the left and right renderings, ColorTexture[1] is NULL,\r
+/// and Viewport[0] and Viewport[1] refer to sub-rects with ColorTexture[0].\r
+/// - ColorTexture[0] contains a single monoscopic rendering, and Viewport[0] and\r
+/// Viewport[1] both refer to that rendering.\r
+///\r
+/// \see ovrTextureSwapChain, ovr_SubmitFrame\r
+///\r
+typedef struct OVR_ALIGNAS(OVR_PTR_SIZE) ovrLayerEyeMatrix_ {\r
+ /// Header.Type must be ovrLayerType_EyeMatrix.\r
+ ovrLayerHeader Header;\r
+\r
+ /// ovrTextureSwapChains for the left and right eye respectively.\r
+ /// The second one of which can be NULL for cases described above.\r
+ ovrTextureSwapChain ColorTexture[ovrEye_Count];\r
+\r
+ /// Specifies the ColorTexture sub-rect UV coordinates.\r
+ /// Both Viewport[0] and Viewport[1] must be valid.\r
+ ovrRecti Viewport[ovrEye_Count];\r
+\r
+ /// Specifies the position and orientation of each eye view, with position specified in meters.\r
+ /// RenderPose will typically be the value returned from ovr_CalcEyePoses,\r
+ /// but can be different in special cases if a different head pose is used for rendering.\r
+ ovrPosef RenderPose[ovrEye_Count];\r
+\r
+ /// Specifies the mapping from a view-space vector\r
+ /// to a UV coordinate on the textures given above.\r
+ /// P = (x,y,z,1)*Matrix\r
+ /// TexU = P.x/P.z\r
+ /// TexV = P.y/P.z\r
+ ovrMatrix4f Matrix[ovrEye_Count];\r
+\r
+ /// Specifies the timestamp when the source ovrPosef (used in calculating RenderPose)\r
+ /// was sampled from the SDK. Typically retrieved by calling ovr_GetTimeInSeconds\r
+ /// around the instant the application calls ovr_GetTrackingState\r
+ /// The main purpose for this is to accurately track app tracking latency.\r
+ double SensorSampleTime;\r
+\r
+} ovrLayerEyeMatrix;\r
+\r
+/// Describes a layer of Quad type, which is a single quad in world or viewer space.\r
+/// It is used for ovrLayerType_Quad. This type of layer represents a single\r
+/// object placed in the world and not a stereo view of the world itself.\r
+///\r
+/// A typical use of ovrLayerType_Quad is to draw a television screen in a room\r
+/// that for some reason is more convenient to draw as a layer than as part of the main\r
+/// view in layer 0. For example, it could implement a 3D popup GUI that is drawn at a\r
+/// higher resolution than layer 0 to improve fidelity of the GUI.\r
+///\r
+/// Quad layers are visible from both sides; they are not back-face culled.\r
+///\r
+/// \see ovrTextureSwapChain, ovr_SubmitFrame\r
+///\r
+typedef struct OVR_ALIGNAS(OVR_PTR_SIZE) ovrLayerQuad_ {\r
+ /// Header.Type must be ovrLayerType_Quad.\r
+ ovrLayerHeader Header;\r
+\r
+ /// Contains a single image, never with any stereo view.\r
+ ovrTextureSwapChain ColorTexture;\r
+\r
+ /// Specifies the ColorTexture sub-rect UV coordinates.\r
+ ovrRecti Viewport;\r
+\r
+ /// Specifies the orientation and position of the center point of a Quad layer type.\r
+ /// The supplied direction is the vector perpendicular to the quad.\r
+ /// The position is in real-world meters (not the application's virtual world,\r
+ /// the physical world the user is in) and is relative to the "zero" position\r
+ /// set by ovr_RecenterTrackingOrigin unless the ovrLayerFlag_HeadLocked flag is used.\r
+ ovrPosef QuadPoseCenter;\r
+\r
+ /// Width and height (respectively) of the quad in meters.\r
+ ovrVector2f QuadSize;\r
+\r
+} ovrLayerQuad;\r
+\r
+/// Describes a layer of type ovrLayerType_Cylinder which is a single cylinder\r
+/// relative to the recentered origin. This type of layer represents a single\r
+/// object placed in the world and not a stereo view of the world itself.\r
+///\r
+/// -Z +Y\r
+/// U=0 +--+--+ U=1\r
+/// +---+ | +---+ +-----------------+ - V=0\r
+/// +--+ \ | / +--+ | | |\r
+/// +-+ \ / +-+ | | |\r
+/// ++ \ A / ++ | | |\r
+/// ++ \---/ ++ | | |\r
+/// | \ / | | +X | |\r
+/// +-------------C------R------+ +X +--------C--------+ | <--- Height\r
+/// (+Y is out of screen) | | |\r
+/// | | |\r
+/// R = Radius | | |\r
+/// A = Angle (0,2*Pi) | | |\r
+/// C = CylinderPoseCenter | | |\r
+/// U/V = UV Coordinates +-----------------+ - V=1\r
+///\r
+/// An identity CylinderPoseCenter places the center of the cylinder\r
+/// at the recentered origin unless the headlocked flag is set.\r
+///\r
+/// Does not utilize HmdSpaceToWorldScaleInMeters. If necessary, adjust\r
+/// translation and radius.\r
+///\r
+/// \note Only the interior surface of the cylinder is visible. Use cylinder\r
+/// layers when the user cannot leave the extents of the cylinder. Artifacts may\r
+/// appear when viewing the cylinder's exterior surface. Additionally, while the\r
+/// interface supports an Angle that ranges from [0,2*Pi] the angle should\r
+/// remain less than 1.9*PI to avoid artifacts where the cylinder edges\r
+/// converge.\r
+///\r
+/// \see ovrTextureSwapChain, ovr_SubmitFrame\r
+///\r
+typedef struct OVR_ALIGNAS(OVR_PTR_SIZE) ovrLayerCylinder_ {\r
+ /// Header.Type must be ovrLayerType_Cylinder.\r
+ ovrLayerHeader Header;\r
+\r
+ /// Contains a single image, never with any stereo view.\r
+ ovrTextureSwapChain ColorTexture;\r
+\r
+ /// Specifies the ColorTexture sub-rect UV coordinates.\r
+ ovrRecti Viewport;\r
+\r
+ /// Specifies the orientation and position of the center point of a cylinder layer type.\r
+ /// The position is in real-world meters not the application's virtual world,\r
+ /// but the physical world the user is in. It is relative to the "zero" position\r
+ /// set by ovr_RecenterTrackingOrigin unless the ovrLayerFlag_HeadLocked flag is used.\r
+ ovrPosef CylinderPoseCenter;\r
+\r
+ /// Radius of the cylinder in meters.\r
+ float CylinderRadius;\r
+\r
+ /// Angle in radians. Range is from 0 to 2*Pi exclusive covering the entire\r
+ /// cylinder (see diagram and note above).\r
+ float CylinderAngle;\r
+\r
+ /// Custom aspect ratio presumably set based on 'Viewport'. Used to\r
+ /// calculate the height of the cylinder based on the arc-length (CylinderAngle)\r
+ /// and radius (CylinderRadius) given above. The height of the cylinder is\r
+ /// given by: height = (CylinderRadius * CylinderAngle) / CylinderAspectRatio.\r
+ /// Aspect ratio is width / height.\r
+ float CylinderAspectRatio;\r
+\r
+} ovrLayerCylinder;\r
+\r
+/// Describes a layer of type ovrLayerType_Cube which is a single timewarped\r
+/// cubemap at infinity. When looking down the recentered origin's -Z axis, +X\r
+/// face is left and +Y face is up. Similarly, if headlocked the +X face is\r
+/// left, +Y face is up and -Z face is forward. Note that the coordinate system\r
+/// is left-handed.\r
+///\r
+/// ovrLayerFlag_TextureOriginAtBottomLeft flag is not supported by ovrLayerCube.\r
+///\r
+/// \see ovrTextureSwapChain, ovr_SubmitFrame\r
+///\r
+typedef struct OVR_ALIGNAS(OVR_PTR_SIZE) ovrLayerCube_ {\r
+ /// Header.Type must be ovrLayerType_Cube.\r
+ ovrLayerHeader Header;\r
+\r
+ /// Orientation of the cube.\r
+ ovrQuatf Orientation;\r
+\r
+ /// Contains a single cubemap swapchain (not a stereo pair of swapchains).\r
+ ovrTextureSwapChain CubeMapTexture;\r
+} ovrLayerCube;\r
+\r
+\r
+\r
+/// Union that combines ovrLayer types in a way that allows them\r
+/// to be used in a polymorphic way.\r
+typedef union ovrLayer_Union_ {\r
+ ovrLayerHeader Header;\r
+ ovrLayerEyeFov EyeFov;\r
+ ovrLayerEyeFovDepth EyeFovDepth;\r
+ ovrLayerQuad Quad;\r
+ ovrLayerEyeFovMultires Multires;\r
+ ovrLayerCylinder Cylinder;\r
+ ovrLayerCube Cube;\r
+} ovrLayer_Union;\r
+\r
+//@}\r
+\r
+#if !defined(OVR_EXPORTING_CAPI)\r
+\r
+/// @name SDK Distortion Rendering\r
+///\r
+/// All of rendering functions including the configure and frame functions\r
+/// are not thread safe. It is OK to use ConfigureRendering on one thread and handle\r
+/// frames on another thread, but explicit synchronization must be done since\r
+/// functions that depend on configured state are not reentrant.\r
+///\r
+/// These functions support rendering of distortion by the SDK.\r
+///\r
+//@{\r
+\r
+/// TextureSwapChain creation is rendering API-specific.\r
+/// ovr_CreateTextureSwapChainDX and ovr_CreateTextureSwapChainGL can be found in the\r
+/// rendering API-specific headers, such as OVR_CAPI_D3D.h and OVR_CAPI_GL.h\r
+\r
+/// Gets the number of buffers in an ovrTextureSwapChain.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] chain Specifies the ovrTextureSwapChain for which the length should be retrieved.\r
+/// \param[out] out_Length Returns the number of buffers in the specified chain.\r
+///\r
+/// \return Returns an ovrResult for which OVR_SUCCESS(result) is false upon error.\r
+///\r
+/// \see ovr_CreateTextureSwapChainDX, ovr_CreateTextureSwapChainGL\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_GetTextureSwapChainLength(ovrSession session, ovrTextureSwapChain chain, int* out_Length);\r
+\r
+/// Gets the current index in an ovrTextureSwapChain.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] chain Specifies the ovrTextureSwapChain for which the index should be retrieved.\r
+/// \param[out] out_Index Returns the current (free) index in specified chain.\r
+///\r
+/// \return Returns an ovrResult for which OVR_SUCCESS(result) is false upon error.\r
+///\r
+/// \see ovr_CreateTextureSwapChainDX, ovr_CreateTextureSwapChainGL\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_GetTextureSwapChainCurrentIndex(ovrSession session, ovrTextureSwapChain chain, int* out_Index);\r
+\r
+/// Gets the description of the buffers in an ovrTextureSwapChain\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] chain Specifies the ovrTextureSwapChain for which the description\r
+/// should be retrieved.\r
+/// \param[out] out_Desc Returns the description of the specified chain.\r
+///\r
+/// \return Returns an ovrResult for which OVR_SUCCESS(result) is false upon error.\r
+///\r
+/// \see ovr_CreateTextureSwapChainDX, ovr_CreateTextureSwapChainGL\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_GetTextureSwapChainDesc(\r
+ ovrSession session,\r
+ ovrTextureSwapChain chain,\r
+ ovrTextureSwapChainDesc* out_Desc);\r
+\r
+/// Commits any pending changes to an ovrTextureSwapChain, and advances its current index\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] chain Specifies the ovrTextureSwapChain to commit.\r
+///\r
+/// \note When Commit is called, the texture at the current index is considered ready for use by the\r
+/// runtime, and further writes to it should be avoided. The swap chain's current index is advanced,\r
+/// providing there's room in the chain. The next time the SDK dereferences this texture swap chain,\r
+/// it will synchronize with the app's graphics context and pick up the submitted index, opening up\r
+/// room in the swap chain for further commits.\r
+///\r
+/// \return Returns an ovrResult for which OVR_SUCCESS(result) is false upon error.\r
+/// Failures include but aren't limited to:\r
+/// - ovrError_TextureSwapChainFull: ovr_CommitTextureSwapChain was called too many times on a\r
+/// texture swapchain without calling submit to use the chain.\r
+///\r
+/// \see ovr_CreateTextureSwapChainDX, ovr_CreateTextureSwapChainGL\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_CommitTextureSwapChain(ovrSession session, ovrTextureSwapChain chain);\r
+\r
+/// Destroys an ovrTextureSwapChain and frees all the resources associated with it.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] chain Specifies the ovrTextureSwapChain to destroy. If it is NULL then\r
+/// this function has no effect.\r
+///\r
+/// \see ovr_CreateTextureSwapChainDX, ovr_CreateTextureSwapChainGL\r
+///\r
+OVR_PUBLIC_FUNCTION(void)\r
+ovr_DestroyTextureSwapChain(ovrSession session, ovrTextureSwapChain chain);\r
+\r
+/// MirrorTexture creation is rendering API-specific.\r
+/// ovr_CreateMirrorTextureWithOptionsDX and ovr_CreateMirrorTextureWithOptionsGL can be found in\r
+/// rendering API-specific headers, such as OVR_CAPI_D3D.h and OVR_CAPI_GL.h\r
+\r
+/// Destroys a mirror texture previously created by one of the mirror texture creation functions.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] mirrorTexture Specifies the ovrTexture to destroy. If it is NULL then\r
+/// this function has no effect.\r
+///\r
+/// \see ovr_CreateMirrorTextureWithOptionsDX, ovr_CreateMirrorTextureWithOptionsGL\r
+///\r
+OVR_PUBLIC_FUNCTION(void)\r
+ovr_DestroyMirrorTexture(ovrSession session, ovrMirrorTexture mirrorTexture);\r
+\r
+/// Calculates the recommended viewport size for rendering a given eye within the HMD\r
+/// with a given FOV cone.\r
+///\r
+/// Higher FOV will generally require larger textures to maintain quality.\r
+/// Apps packing multiple eye views together on the same texture should ensure there are\r
+/// at least 8 pixels of padding between them to prevent texture filtering and chromatic\r
+/// aberration causing images to leak between the two eye views.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] eye Specifies which eye (left or right) to calculate for.\r
+/// \param[in] fov Specifies the ovrFovPort to use.\r
+/// \param[in] pixelsPerDisplayPixel Specifies the ratio of the number of render target pixels\r
+/// to display pixels at the center of distortion. 1.0 is the default value. Lower\r
+/// values can improve performance, higher values give improved quality.\r
+///\r
+/// <b>Example code</b>\r
+/// \code{.cpp}\r
+/// ovrHmdDesc hmdDesc = ovr_GetHmdDesc(session);\r
+/// ovrSizei eyeSizeLeft = ovr_GetFovTextureSize(session, ovrEye_Left,\r
+/// hmdDesc.DefaultEyeFov[ovrEye_Left], 1.0f);\r
+/// ovrSizei eyeSizeRight = ovr_GetFovTextureSize(session, ovrEye_Right,\r
+/// hmdDesc.DefaultEyeFov[ovrEye_Right], 1.0f);\r
+/// \endcode\r
+///\r
+/// \return Returns the texture width and height size.\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrSizei)\r
+ovr_GetFovTextureSize(\r
+ ovrSession session,\r
+ ovrEyeType eye,\r
+ ovrFovPort fov,\r
+ float pixelsPerDisplayPixel);\r
+\r
+/// Computes the distortion viewport, view adjust, and other rendering parameters for\r
+/// the specified eye.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] eyeType Specifies which eye (left or right) for which to perform calculations.\r
+/// \param[in] fov Specifies the ovrFovPort to use.\r
+///\r
+/// \return Returns the computed ovrEyeRenderDesc for the given eyeType and field of view.\r
+///\r
+/// \see ovrEyeRenderDesc\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrEyeRenderDesc)\r
+ovr_GetRenderDesc(ovrSession session, ovrEyeType eyeType, ovrFovPort fov);\r
+\r
+/// Waits until surfaces are available and it is time to begin rendering the frame. Must be\r
+/// called before ovr_BeginFrame, but not necessarily from the same thread.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+///\r
+/// \param[in] frameIndex Specifies the targeted application frame index.\r
+///\r
+/// \return Returns an ovrResult for which OVR_SUCCESS(result) is false upon error and true\r
+/// upon success. Return values include but aren't limited to:\r
+/// - ovrSuccess: command completed successfully.\r
+/// - ovrSuccess_NotVisible: rendering of a previous frame completed successfully but was not\r
+/// displayed on the HMD, usually because another application currently has ownership of the\r
+/// HMD. Applications receiving this result should stop rendering new content and call\r
+/// ovr_GetSessionStatus to detect visibility.\r
+/// - ovrError_DisplayLost: The session has become invalid (such as due to a device removal)\r
+/// and the shared resources need to be released (ovr_DestroyTextureSwapChain), the session\r
+/// needs to destroyed (ovr_Destroy) and recreated (ovr_Create), and new resources need to be\r
+/// created (ovr_CreateTextureSwapChainXXX). The application's existing private graphics\r
+/// resources do not need to be recreated unless the new ovr_Create call returns a different\r
+/// GraphicsLuid.\r
+///\r
+/// \see ovr_BeginFrame, ovr_EndFrame, ovr_GetSessionStatus\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_WaitToBeginFrame(ovrSession session, long long frameIndex);\r
+\r
+/// Called from render thread before application begins rendering. Must be called after\r
+/// ovr_WaitToBeginFrame and before ovr_EndFrame, but not necessarily from the same threads.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+///\r
+/// \param[in] frameIndex Specifies the targeted application frame index. It must match what was\r
+/// passed to ovr_WaitToBeginFrame.\r
+///\r
+/// \return Returns an ovrResult for which OVR_SUCCESS(result) is false upon error and true\r
+/// upon success. Return values include but aren't limited to:\r
+/// - ovrSuccess: command completed successfully.\r
+/// - ovrError_DisplayLost: The session has become invalid (such as due to a device removal)\r
+/// and the shared resources need to be released (ovr_DestroyTextureSwapChain), the session\r
+/// needs to destroyed (ovr_Destroy) and recreated (ovr_Create), and new resources need to be\r
+/// created (ovr_CreateTextureSwapChainXXX). The application's existing private graphics\r
+/// resources do not need to be recreated unless the new ovr_Create call returns a different\r
+/// GraphicsLuid.\r
+///\r
+/// \see ovr_WaitToBeginFrame, ovr_EndFrame\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_BeginFrame(ovrSession session, long long frameIndex);\r
+\r
+/// Called from render thread after application has finished rendering. Must be called after\r
+/// ovr_BeginFrame, but not necessarily from the same thread. Submits layers for distortion and\r
+/// display, which will happen asynchronously.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+///\r
+/// \param[in] frameIndex Specifies the targeted application frame index. It must match what was\r
+/// passed to ovr_BeginFrame.\r
+///\r
+/// \param[in] viewScaleDesc Provides additional information needed only if layerPtrList contains\r
+/// an ovrLayerType_Quad. If NULL, a default version is used based on the current\r
+/// configuration and a 1.0 world scale.\r
+///\r
+/// \param[in] layerPtrList Specifies a list of ovrLayer pointers, which can include NULL entries to\r
+/// indicate that any previously shown layer at that index is to not be displayed.\r
+/// Each layer header must be a part of a layer structure such as ovrLayerEyeFov or\r
+/// ovrLayerQuad, with Header.Type identifying its type. A NULL layerPtrList entry in the\r
+/// array indicates the absence of the given layer.\r
+///\r
+/// \param[in] layerCount Indicates the number of valid elements in layerPtrList. The maximum\r
+/// supported layerCount is not currently specified, but may be specified in a future\r
+/// version.\r
+///\r
+/// - Layers are drawn in the order they are specified in the array, regardless of the layer type.\r
+///\r
+/// - Layers are not remembered between successive calls to ovr_SubmitFrame. A layer must be\r
+/// specified in every call to ovr_SubmitFrame or it won't be displayed.\r
+///\r
+/// - If a layerPtrList entry that was specified in a previous call to ovr_SubmitFrame is\r
+/// passed as NULL or is of type ovrLayerType_Disabled, that layer is no longer displayed.\r
+///\r
+/// - A layerPtrList entry can be of any layer type and multiple entries of the same layer type\r
+/// are allowed. No layerPtrList entry may be duplicated (i.e. the same pointer as an earlier\r
+/// entry).\r
+///\r
+/// <b>Example code</b>\r
+/// \code{.cpp}\r
+/// ovrLayerEyeFov layer0;\r
+/// ovrLayerQuad layer1;\r
+/// ...\r
+/// ovrLayerHeader* layers[2] = { &layer0.Header, &layer1.Header };\r
+/// ovrResult result = ovr_EndFrame(session, frameIndex, nullptr, layers, 2);\r
+/// \endcode\r
+///\r
+/// \return Returns an ovrResult for which OVR_SUCCESS(result) is false upon error and true\r
+/// upon success. Return values include but aren't limited to:\r
+/// - ovrSuccess: rendering completed successfully.\r
+/// - ovrError_DisplayLost: The session has become invalid (such as due to a device removal)\r
+/// and the shared resources need to be released (ovr_DestroyTextureSwapChain), the session\r
+/// needs to destroyed (ovr_Destroy) and recreated (ovr_Create), and new resources need to be\r
+/// created (ovr_CreateTextureSwapChainXXX). The application's existing private graphics\r
+/// resources do not need to be recreated unless the new ovr_Create call returns a different\r
+/// GraphicsLuid.\r
+/// - ovrError_TextureSwapChainInvalid: The ovrTextureSwapChain is in an incomplete or\r
+/// inconsistent state. Ensure ovr_CommitTextureSwapChain was called at least once first.\r
+///\r
+/// \see ovr_WaitToBeginFrame, ovr_BeginFrame, ovrViewScaleDesc, ovrLayerHeader\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_EndFrame(\r
+ ovrSession session,\r
+ long long frameIndex,\r
+ const ovrViewScaleDesc* viewScaleDesc,\r
+ ovrLayerHeader const* const* layerPtrList,\r
+ unsigned int layerCount);\r
+\r
+/// Submits layers for distortion and display.\r
+///\r
+/// Deprecated. Use ovr_WaitToBeginFrame, ovr_BeginFrame, and ovr_EndFrame instead.\r
+///\r
+/// ovr_SubmitFrame triggers distortion and processing which might happen asynchronously.\r
+/// The function will return when there is room in the submission queue and surfaces\r
+/// are available. Distortion might or might not have completed.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+///\r
+/// \param[in] frameIndex Specifies the targeted application frame index, or 0 to refer to one frame\r
+/// after the last time ovr_SubmitFrame was called.\r
+///\r
+/// \param[in] viewScaleDesc Provides additional information needed only if layerPtrList contains\r
+/// an ovrLayerType_Quad. If NULL, a default version is used based on the current\r
+/// configuration and a 1.0 world scale.\r
+///\r
+/// \param[in] layerPtrList Specifies a list of ovrLayer pointers, which can include NULL entries to\r
+/// indicate that any previously shown layer at that index is to not be displayed.\r
+/// Each layer header must be a part of a layer structure such as ovrLayerEyeFov or\r
+/// ovrLayerQuad, with Header.Type identifying its type. A NULL layerPtrList entry in the\r
+/// array indicates the absence of the given layer.\r
+///\r
+/// \param[in] layerCount Indicates the number of valid elements in layerPtrList. The maximum\r
+/// supported layerCount is not currently specified, but may be specified in a future\r
+/// version.\r
+///\r
+/// - Layers are drawn in the order they are specified in the array, regardless of the layer type.\r
+///\r
+/// - Layers are not remembered between successive calls to ovr_SubmitFrame. A layer must be\r
+/// specified in every call to ovr_SubmitFrame or it won't be displayed.\r
+///\r
+/// - If a layerPtrList entry that was specified in a previous call to ovr_SubmitFrame is\r
+/// passed as NULL or is of type ovrLayerType_Disabled, that layer is no longer displayed.\r
+///\r
+/// - A layerPtrList entry can be of any layer type and multiple entries of the same layer type\r
+/// are allowed. No layerPtrList entry may be duplicated (i.e. the same pointer as an earlier\r
+/// entry).\r
+///\r
+/// <b>Example code</b>\r
+/// \code{.cpp}\r
+/// ovrLayerEyeFov layer0;\r
+/// ovrLayerQuad layer1;\r
+/// ...\r
+/// ovrLayerHeader* layers[2] = { &layer0.Header, &layer1.Header };\r
+/// ovrResult result = ovr_SubmitFrame(session, frameIndex, nullptr, layers, 2);\r
+/// \endcode\r
+///\r
+/// \return Returns an ovrResult for which OVR_SUCCESS(result) is false upon error and true\r
+/// upon success. Return values include but aren't limited to:\r
+/// - ovrSuccess: rendering completed successfully.\r
+/// - ovrSuccess_NotVisible: rendering completed successfully but was not displayed on the HMD,\r
+/// usually because another application currently has ownership of the HMD. Applications\r
+/// receiving this result should stop rendering new content, call ovr_GetSessionStatus\r
+/// to detect visibility.\r
+/// - ovrError_DisplayLost: The session has become invalid (such as due to a device removal)\r
+/// and the shared resources need to be released (ovr_DestroyTextureSwapChain), the session\r
+/// needs to destroyed (ovr_Destroy) and recreated (ovr_Create), and new resources need to be\r
+/// created (ovr_CreateTextureSwapChainXXX). The application's existing private graphics\r
+/// resources do not need to be recreated unless the new ovr_Create call returns a different\r
+/// GraphicsLuid.\r
+/// - ovrError_TextureSwapChainInvalid: The ovrTextureSwapChain is in an incomplete or\r
+/// inconsistent state. Ensure ovr_CommitTextureSwapChain was called at least once first.\r
+///\r
+/// \see ovr_GetPredictedDisplayTime, ovrViewScaleDesc, ovrLayerHeader, ovr_GetSessionStatus\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_SubmitFrame(\r
+ ovrSession session,\r
+ long long frameIndex,\r
+ const ovrViewScaleDesc* viewScaleDesc,\r
+ ovrLayerHeader const* const* layerPtrList,\r
+ unsigned int layerCount);\r
+///@}\r
+\r
+#endif // !defined(OVR_EXPORTING_CAPI)\r
+\r
+//-------------------------------------------------------------------------------------\r
+/// @name Frame Timing\r
+///\r
+//@{\r
+\r
+///\r
+/// Contains the performance stats for a given SDK compositor frame\r
+///\r
+/// All of the 'int' typed fields can be reset via the ovr_ResetPerfStats call.\r
+///\r
+typedef struct OVR_ALIGNAS(4) ovrPerfStatsPerCompositorFrame_ {\r
+ /// Vsync Frame Index - increments with each HMD vertical synchronization signal (i.e. vsync or\r
+ /// refresh rate)\r
+ /// If the compositor drops a frame, expect this value to increment more than 1 at a time.\r
+ int HmdVsyncIndex;\r
+\r
+ ///\r
+ /// Application stats\r
+ ///\r
+\r
+ /// Index that increments with each successive ovr_SubmitFrame call\r
+ int AppFrameIndex;\r
+\r
+ /// If the app fails to call ovr_SubmitFrame on time, then expect this value to increment with\r
+ /// each missed frame\r
+ int AppDroppedFrameCount;\r
+\r
+ /// Motion-to-photon latency for the application\r
+ /// This value is calculated by either using the SensorSampleTime provided for the ovrLayerEyeFov\r
+ /// or if that\r
+ /// is not available, then the call to ovr_GetTrackingState which has latencyMarker set to ovrTrue\r
+ float AppMotionToPhotonLatency;\r
+\r
+ /// Amount of queue-ahead in seconds provided to the app based on performance and overlap of\r
+ /// CPU and GPU utilization. A value of 0.0 would mean the CPU & GPU workload is being completed\r
+ /// in 1 frame's worth of time, while 11 ms (on the CV1) of queue ahead would indicate that the\r
+ /// app's CPU workload for the next frame is overlapping the GPU workload for the current frame.\r
+ float AppQueueAheadTime;\r
+\r
+ /// Amount of time in seconds spent on the CPU by the app's render-thread that calls\r
+ /// ovr_SubmitFram. Measured as elapsed time between from when app regains control from\r
+ /// ovr_SubmitFrame to the next time the app calls ovr_SubmitFrame.\r
+ float AppCpuElapsedTime;\r
+\r
+ /// Amount of time in seconds spent on the GPU by the app.\r
+ /// Measured as elapsed time between each ovr_SubmitFrame call using GPU timing queries.\r
+ float AppGpuElapsedTime;\r
+\r
+ ///\r
+ /// SDK Compositor stats\r
+ ///\r
+\r
+ /// Index that increments each time the SDK compositor completes a distortion and timewarp pass\r
+ /// Since the compositor operates asynchronously, even if the app calls ovr_SubmitFrame too late,\r
+ /// the compositor will kick off for each vsync.\r
+ int CompositorFrameIndex;\r
+\r
+ /// Increments each time the SDK compositor fails to complete in time\r
+ /// This is not tied to the app's performance, but failure to complete can be related to other\r
+ /// factors such as OS capabilities, overall available hardware cycles to execute the compositor\r
+ /// in time and other factors outside of the app's control.\r
+ int CompositorDroppedFrameCount;\r
+\r
+ /// Motion-to-photon latency of the SDK compositor in seconds.\r
+ /// This is the latency of timewarp which corrects the higher app latency as well as dropped app\r
+ /// frames.\r
+ float CompositorLatency;\r
+\r
+ /// The amount of time in seconds spent on the CPU by the SDK compositor. Unless the\r
+ /// VR app is utilizing all of the CPU cores at their peak performance, there is a good chance the\r
+ /// compositor CPU times will not affect the app's CPU performance in a major way.\r
+ float CompositorCpuElapsedTime;\r
+\r
+ /// The amount of time in seconds spent on the GPU by the SDK compositor. Any time spent on the\r
+ /// compositor will eat away from the available GPU time for the app.\r
+ float CompositorGpuElapsedTime;\r
+\r
+ /// The amount of time in seconds spent from the point the CPU kicks off the compositor to the\r
+ /// point in time the compositor completes the distortion & timewarp on the GPU. In the event the\r
+ /// GPU time is not available, expect this value to be -1.0f.\r
+ float CompositorCpuStartToGpuEndElapsedTime;\r
+\r
+ /// The amount of time in seconds left after the compositor is done on the GPU to the associated\r
+ /// V-Sync time. In the event the GPU time is not available, expect this value to be -1.0f.\r
+ float CompositorGpuEndToVsyncElapsedTime;\r
+\r
+ ///\r
+ /// Async Spacewarp stats (ASW)\r
+ ///\r
+\r
+ /// Will be true if ASW is active for the given frame such that the application is being forced\r
+ /// into half the frame-rate while the compositor continues to run at full frame-rate.\r
+ ovrBool AswIsActive;\r
+\r
+ /// Increments each time ASW it activated where the app was forced in and out of\r
+ /// half-rate rendering.\r
+ int AswActivatedToggleCount;\r
+\r
+ /// Accumulates the number of frames presented by the compositor which had extrapolated\r
+ /// ASW frames presented.\r
+ int AswPresentedFrameCount;\r
+\r
+ /// Accumulates the number of frames that the compositor tried to present when ASW is\r
+ /// active but failed.\r
+ int AswFailedFrameCount;\r
+\r
+} ovrPerfStatsPerCompositorFrame;\r
+\r
+///\r
+/// Maximum number of frames of performance stats provided back to the caller of ovr_GetPerfStats\r
+///\r
+enum { ovrMaxProvidedFrameStats = 5 };\r
+\r
+///\r
+/// This is a complete descriptor of the performance stats provided by the SDK\r
+///\r
+/// \see ovr_GetPerfStats, ovrPerfStatsPerCompositorFrame\r
+typedef struct OVR_ALIGNAS(4) ovrPerfStats_ {\r
+ /// FrameStatsCount will have a maximum value set by ovrMaxProvidedFrameStats\r
+ /// If the application calls ovr_GetPerfStats at the native refresh rate of the HMD\r
+ /// then FrameStatsCount will be 1. If the app's workload happens to force\r
+ /// ovr_GetPerfStats to be called at a lower rate, then FrameStatsCount will be 2 or more.\r
+ /// If the app does not want to miss any performance data for any frame, it needs to\r
+ /// ensure that it is calling ovr_SubmitFrame and ovr_GetPerfStats at a rate that is at least:\r
+ /// "HMD_refresh_rate / ovrMaxProvidedFrameStats". On the Oculus Rift CV1 HMD, this will\r
+ /// be equal to 18 times per second.\r
+ ///\r
+ /// The performance entries will be ordered in reverse chronological order such that the\r
+ /// first entry will be the most recent one.\r
+ ovrPerfStatsPerCompositorFrame FrameStats[ovrMaxProvidedFrameStats];\r
+ int FrameStatsCount;\r
+\r
+ /// If the app calls ovr_GetPerfStats at less than 18 fps for CV1, then AnyFrameStatsDropped\r
+ /// will be ovrTrue and FrameStatsCount will be equal to ovrMaxProvidedFrameStats.\r
+ ovrBool AnyFrameStatsDropped;\r
+\r
+ /// AdaptiveGpuPerformanceScale is an edge-filtered value that a caller can use to adjust\r
+ /// the graphics quality of the application to keep the GPU utilization in check. The value\r
+ /// is calculated as: (desired_GPU_utilization / current_GPU_utilization)\r
+ /// As such, when this value is 1.0, the GPU is doing the right amount of work for the app.\r
+ /// Lower values mean the app needs to pull back on the GPU utilization.\r
+ /// If the app is going to directly drive render-target resolution using this value, then\r
+ /// be sure to take the square-root of the value before scaling the resolution with it.\r
+ /// Changing render target resolutions however is one of the many things an app can do\r
+ /// increase or decrease the amount of GPU utilization.\r
+ /// Since AdaptiveGpuPerformanceScale is edge-filtered and does not change rapidly\r
+ /// (i.e. reports non-1.0 values once every couple of seconds) the app can make the\r
+ /// necessary adjustments and then keep watching the value to see if it has been satisfied.\r
+ float AdaptiveGpuPerformanceScale;\r
+\r
+ /// Will be true if Async Spacewarp (ASW) is available for this system which is dependent on\r
+ /// several factors such as choice of GPU, OS and debug overrides\r
+ ovrBool AswIsAvailable;\r
+\r
+ /// Contains the Process ID of the VR application the stats are being polled for\r
+ /// If an app continues to grab perf stats even when it is not visible, then expect this\r
+ /// value to point to the other VR app that has grabbed focus (i.e. became visible)\r
+ ovrProcessId VisibleProcessId;\r
+} ovrPerfStats;\r
+\r
+#if !defined(OVR_EXPORTING_CAPI)\r
+\r
+/// Retrieves performance stats for the VR app as well as the SDK compositor.\r
+///\r
+/// This function will return stats for the VR app that is currently visible in the HMD\r
+/// regardless of what VR app is actually calling this function.\r
+///\r
+/// If the VR app is trying to make sure the stats returned belong to the same application,\r
+/// the caller can compare the VisibleProcessId with their own process ID. Normally this will\r
+/// be the case if the caller is only calling ovr_GetPerfStats when ovr_GetSessionStatus has\r
+/// IsVisible flag set to be true.\r
+///\r
+/// If the VR app calling ovr_GetPerfStats is actually the one visible in the HMD,\r
+/// then new perf stats will only be populated after a new call to ovr_SubmitFrame.\r
+/// That means subsequent calls to ovr_GetPerfStats after the first one without calling\r
+/// ovr_SubmitFrame will receive a FrameStatsCount of zero.\r
+///\r
+/// If the VR app is not visible, or was initially marked as ovrInit_Invisible, then each call\r
+/// to ovr_GetPerfStats will immediately fetch new perf stats from the compositor without\r
+/// a need for the ovr_SubmitFrame call.\r
+///\r
+/// Even though invisible VR apps do not require ovr_SubmitFrame to be called to gather new\r
+/// perf stats, since stats are generated at the native refresh rate of the HMD (i.e. 90 Hz\r
+/// for CV1), calling it at a higher rate than that would be unnecessary.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[out] outStats Contains the performance stats for the application and SDK compositor\r
+/// \return Returns an ovrResult for which OVR_SUCCESS(result) is false upon error and true\r
+/// upon success.\r
+///\r
+/// \see ovrPerfStats, ovrPerfStatsPerCompositorFrame, ovr_ResetPerfStats\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetPerfStats(ovrSession session, ovrPerfStats* outStats);\r
+\r
+/// Resets the accumulated stats reported in each ovrPerfStatsPerCompositorFrame back to zero.\r
+///\r
+/// Only the integer values such as HmdVsyncIndex, AppDroppedFrameCount etc. will be reset\r
+/// as the other fields such as AppMotionToPhotonLatency are independent timing values updated\r
+/// per-frame.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \return Returns an ovrResult for which OVR_SUCCESS(result) is false upon error and true\r
+/// upon success.\r
+///\r
+/// \see ovrPerfStats, ovrPerfStatsPerCompositorFrame, ovr_GetPerfStats\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_ResetPerfStats(ovrSession session);\r
+\r
+/// Gets the time of the specified frame midpoint.\r
+///\r
+/// Predicts the time at which the given frame will be displayed. The predicted time\r
+/// is the middle of the time period during which the corresponding eye images will\r
+/// be displayed.\r
+///\r
+/// The application should increment frameIndex for each successively targeted frame,\r
+/// and pass that index to any relevant OVR functions that need to apply to the frame\r
+/// identified by that index.\r
+///\r
+/// This function is thread-safe and allows for multiple application threads to target\r
+/// their processing to the same displayed frame.\r
+///\r
+/// In the even that prediction fails due to various reasons (e.g. the display being off\r
+/// or app has yet to present any frames), the return value will be current CPU time.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] frameIndex Identifies the frame the caller wishes to target.\r
+/// A value of zero returns the next frame index.\r
+/// \return Returns the absolute frame midpoint time for the given frameIndex.\r
+/// \see ovr_GetTimeInSeconds\r
+///\r
+OVR_PUBLIC_FUNCTION(double) ovr_GetPredictedDisplayTime(ovrSession session, long long frameIndex);\r
+\r
+/// Returns global, absolute high-resolution time in seconds.\r
+///\r
+/// The time frame of reference for this function is not specified and should not be\r
+/// depended upon.\r
+///\r
+/// \return Returns seconds as a floating point value.\r
+/// \see ovrPoseStatef, ovrFrameTiming\r
+///\r
+OVR_PUBLIC_FUNCTION(double) ovr_GetTimeInSeconds();\r
+\r
+#endif // !defined(OVR_EXPORTING_CAPI)\r
+\r
+/// Performance HUD enables the HMD user to see information critical to\r
+/// the real-time operation of the VR application such as latency timing,\r
+/// and CPU & GPU performance metrics\r
+///\r
+/// App can toggle performance HUD modes as such:\r
+/// \code{.cpp}\r
+/// ovrPerfHudMode PerfHudMode = ovrPerfHud_LatencyTiming;\r
+/// ovr_SetInt(session, OVR_PERF_HUD_MODE, (int)PerfHudMode);\r
+/// \endcode\r
+///\r
+typedef enum ovrPerfHudMode_ {\r
+ ovrPerfHud_Off = 0, ///< Turns off the performance HUD\r
+ ovrPerfHud_PerfSummary = 1, ///< Shows performance summary and headroom\r
+ ovrPerfHud_LatencyTiming = 2, ///< Shows latency related timing info\r
+ ovrPerfHud_AppRenderTiming = 3, ///< Shows render timing info for application\r
+ ovrPerfHud_CompRenderTiming = 4, ///< Shows render timing info for OVR compositor\r
+ ovrPerfHud_AswStats = 6, ///< Shows Async Spacewarp-specific info\r
+ ovrPerfHud_VersionInfo = 5, ///< Shows SDK & HMD version Info\r
+ ovrPerfHud_Count = 7, ///< \internal Count of enumerated elements.\r
+ ovrPerfHud_EnumSize = 0x7fffffff ///< \internal Force type int32_t.\r
+} ovrPerfHudMode;\r
+\r
+/// Layer HUD enables the HMD user to see information about a layer\r
+///\r
+/// App can toggle layer HUD modes as such:\r
+/// \code{.cpp}\r
+/// ovrLayerHudMode LayerHudMode = ovrLayerHud_Info;\r
+/// ovr_SetInt(session, OVR_LAYER_HUD_MODE, (int)LayerHudMode);\r
+/// \endcode\r
+///\r
+typedef enum ovrLayerHudMode_ {\r
+ ovrLayerHud_Off = 0, ///< Turns off the layer HUD\r
+ ovrLayerHud_Info = 1, ///< Shows info about a specific layer\r
+ ovrLayerHud_EnumSize = 0x7fffffff\r
+} ovrLayerHudMode;\r
+\r
+///@}\r
+\r
+/// Debug HUD is provided to help developers gauge and debug the fidelity of their app's\r
+/// stereo rendering characteristics. Using the provided quad and crosshair guides,\r
+/// the developer can verify various aspects such as VR tracking units (e.g. meters),\r
+/// stereo camera-parallax properties (e.g. making sure objects at infinity are rendered\r
+/// with the proper separation), measuring VR geometry sizes and distances and more.\r
+///\r
+/// App can toggle the debug HUD modes as such:\r
+/// \code{.cpp}\r
+/// ovrDebugHudStereoMode DebugHudMode = ovrDebugHudStereo_QuadWithCrosshair;\r
+/// ovr_SetInt(session, OVR_DEBUG_HUD_STEREO_MODE, (int)DebugHudMode);\r
+/// \endcode\r
+///\r
+/// The app can modify the visual properties of the stereo guide (i.e. quad, crosshair)\r
+/// using the ovr_SetFloatArray function. For a list of tweakable properties,\r
+/// see the OVR_DEBUG_HUD_STEREO_GUIDE_* keys in the OVR_CAPI_Keys.h header file.\r
+typedef enum ovrDebugHudStereoMode_ {\r
+ /// Turns off the Stereo Debug HUD.\r
+ ovrDebugHudStereo_Off = 0,\r
+\r
+ /// Renders Quad in world for Stereo Debugging.\r
+ ovrDebugHudStereo_Quad = 1,\r
+\r
+ /// Renders Quad+crosshair in world for Stereo Debugging\r
+ ovrDebugHudStereo_QuadWithCrosshair = 2,\r
+\r
+ /// Renders screen-space crosshair at infinity for Stereo Debugging\r
+ ovrDebugHudStereo_CrosshairAtInfinity = 3,\r
+\r
+ /// \internal Count of enumerated elements\r
+ ovrDebugHudStereo_Count,\r
+\r
+ ovrDebugHudStereo_EnumSize = 0x7fffffff ///< \internal Force type int32_t\r
+} ovrDebugHudStereoMode;\r
+\r
+#if !defined(OVR_EXPORTING_CAPI)\r
+\r
+// -----------------------------------------------------------------------------------\r
+/// @name Property Access\r
+///\r
+/// These functions read and write OVR properties. Supported properties\r
+/// are defined in OVR_CAPI_Keys.h\r
+///\r
+//@{\r
+\r
+/// Reads a boolean property.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] propertyName The name of the property, which needs to be valid for only the call.\r
+/// \param[in] defaultVal specifes the value to return if the property couldn't be read.\r
+/// \return Returns the property interpreted as a boolean value. Returns defaultVal if\r
+/// the property doesn't exist.\r
+OVR_PUBLIC_FUNCTION(ovrBool)\r
+ovr_GetBool(ovrSession session, const char* propertyName, ovrBool defaultVal);\r
+\r
+/// Writes or creates a boolean property.\r
+/// If the property wasn't previously a boolean property, it is changed to a boolean property.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] propertyName The name of the property, which needs to be valid only for the call.\r
+/// \param[in] value The value to write.\r
+/// \return Returns true if successful, otherwise false. A false result should only occur if the\r
+/// property\r
+/// name is empty or if the property is read-only.\r
+OVR_PUBLIC_FUNCTION(ovrBool)\r
+ovr_SetBool(ovrSession session, const char* propertyName, ovrBool value);\r
+\r
+/// Reads an integer property.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] propertyName The name of the property, which needs to be valid only for the call.\r
+/// \param[in] defaultVal Specifes the value to return if the property couldn't be read.\r
+/// \return Returns the property interpreted as an integer value. Returns defaultVal if\r
+/// the property doesn't exist.\r
+OVR_PUBLIC_FUNCTION(int) ovr_GetInt(ovrSession session, const char* propertyName, int defaultVal);\r
+\r
+/// Writes or creates an integer property.\r
+///\r
+/// If the property wasn't previously a boolean property, it is changed to an integer property.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] propertyName The name of the property, which needs to be valid only for the call.\r
+/// \param[in] value The value to write.\r
+/// \return Returns true if successful, otherwise false. A false result should only occur if the\r
+/// property name is empty or if the property is read-only.\r
+OVR_PUBLIC_FUNCTION(ovrBool) ovr_SetInt(ovrSession session, const char* propertyName, int value);\r
+\r
+/// Reads a float property.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] propertyName The name of the property, which needs to be valid only for the call.\r
+/// \param[in] defaultVal specifes the value to return if the property couldn't be read.\r
+/// \return Returns the property interpreted as an float value. Returns defaultVal if\r
+/// the property doesn't exist.\r
+OVR_PUBLIC_FUNCTION(float)\r
+ovr_GetFloat(ovrSession session, const char* propertyName, float defaultVal);\r
+\r
+/// Writes or creates a float property.\r
+/// If the property wasn't previously a float property, it's changed to a float property.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] propertyName The name of the property, which needs to be valid only for the call.\r
+/// \param[in] value The value to write.\r
+/// \return Returns true if successful, otherwise false. A false result should only occur if the\r
+/// property name is empty or if the property is read-only.\r
+OVR_PUBLIC_FUNCTION(ovrBool)\r
+ovr_SetFloat(ovrSession session, const char* propertyName, float value);\r
+\r
+/// Reads a float array property.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] propertyName The name of the property, which needs to be valid only for the call.\r
+/// \param[in] values An array of float to write to.\r
+/// \param[in] valuesCapacity Specifies the maximum number of elements to write to the values array.\r
+/// \return Returns the number of elements read, or 0 if property doesn't exist or is empty.\r
+OVR_PUBLIC_FUNCTION(unsigned int)\r
+ovr_GetFloatArray(\r
+ ovrSession session,\r
+ const char* propertyName,\r
+ float values[],\r
+ unsigned int valuesCapacity);\r
+\r
+/// Writes or creates a float array property.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] propertyName The name of the property, which needs to be valid only for the call.\r
+/// \param[in] values An array of float to write from.\r
+/// \param[in] valuesSize Specifies the number of elements to write.\r
+/// \return Returns true if successful, otherwise false. A false result should only occur if the\r
+/// property name is empty or if the property is read-only.\r
+OVR_PUBLIC_FUNCTION(ovrBool)\r
+ovr_SetFloatArray(\r
+ ovrSession session,\r
+ const char* propertyName,\r
+ const float values[],\r
+ unsigned int valuesSize);\r
+\r
+/// Reads a string property.\r
+/// Strings are UTF8-encoded and null-terminated.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] propertyName The name of the property, which needs to be valid only for the call.\r
+/// \param[in] defaultVal Specifes the value to return if the property couldn't be read.\r
+/// \return Returns the string property if it exists. Otherwise returns defaultVal, which can be\r
+/// specified as NULL. The return memory is guaranteed to be valid until next call to\r
+/// ovr_GetString or until the session is destroyed, whichever occurs first.\r
+OVR_PUBLIC_FUNCTION(const char*)\r
+ovr_GetString(ovrSession session, const char* propertyName, const char* defaultVal);\r
+\r
+/// Writes or creates a string property.\r
+/// Strings are UTF8-encoded and null-terminated.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] propertyName The name of the property, which needs to be valid only for the call.\r
+/// \param[in] value The string property, which only needs to be valid for the duration of the call.\r
+/// \return Returns true if successful, otherwise false. A false result should only occur if the\r
+/// property name is empty or if the property is read-only.\r
+OVR_PUBLIC_FUNCTION(ovrBool)\r
+ovr_SetString(ovrSession session, const char* propertyName, const char* value);\r
+\r
+///@}\r
+\r
+#endif // !defined(OVR_EXPORTING_CAPI)\r
+\r
+#ifdef __cplusplus\r
+} // extern "C"\r
+#endif\r
+\r
+#if defined(_MSC_VER)\r
+#pragma warning(pop)\r
+#endif\r
+\r
+/// @cond DoxygenIgnore\r
+\r
+\r
+OVR_STATIC_ASSERT(\r
+ sizeof(ovrTextureSwapChainDesc) == 10 * 4,\r
+ "ovrTextureSwapChainDesc size mismatch");\r
+\r
+// -----------------------------------------------------------------------------------\r
+// ***** Backward compatibility #includes\r
+//\r
+// This is at the bottom of this file because the following is dependent on the\r
+// declarations above.\r
+\r
+#if !defined(OVR_CAPI_NO_UTILS)\r
+#include "Extras/OVR_CAPI_Util.h"\r
+#endif\r
+\r
+/// @endcond\r
+\r
+#endif // OVR_CAPI_h\r
--- /dev/null
+/********************************************************************************/ /**\r
+ \file OVR_CAPI_Audio.h\r
+ \brief CAPI audio functions.\r
+ \copyright Copyright 2015 Oculus VR, LLC. All Rights reserved.\r
+ ************************************************************************************/\r
+\r
+#ifndef OVR_CAPI_Audio_h\r
+#define OVR_CAPI_Audio_h\r
+\r
+#ifdef _WIN32\r
+// Prevents <Windows.h> from defining min() and max() macro symbols.\r
+#ifndef NOMINMAX\r
+#define NOMINMAX\r
+#endif\r
+#include <windows.h>\r
+#include "OVR_CAPI.h"\r
+#define OVR_AUDIO_MAX_DEVICE_STR_SIZE 128\r
+\r
+#if !defined(OVR_EXPORTING_CAPI)\r
+\r
+/// Gets the ID of the preferred VR audio output device.\r
+///\r
+/// \param[out] deviceOutId The ID of the user's preferred VR audio device to use,\r
+/// which will be valid upon a successful return value, else it will be WAVE_MAPPER.\r
+///\r
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use\r
+/// ovr_GetLastErrorInfo to get more information.\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetAudioDeviceOutWaveId(UINT* deviceOutId);\r
+\r
+/// Gets the ID of the preferred VR audio input device.\r
+///\r
+/// \param[out] deviceInId The ID of the user's preferred VR audio device to use,\r
+/// which will be valid upon a successful return value, else it will be WAVE_MAPPER.\r
+///\r
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use\r
+/// ovr_GetLastErrorInfo to get more information.\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetAudioDeviceInWaveId(UINT* deviceInId);\r
+\r
+/// Gets the GUID of the preferred VR audio device as a string.\r
+///\r
+/// \param[out] deviceOutStrBuffer A buffer where the GUID string for the device will copied to.\r
+///\r
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use\r
+/// ovr_GetLastErrorInfo to get more information.\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_GetAudioDeviceOutGuidStr(WCHAR deviceOutStrBuffer[OVR_AUDIO_MAX_DEVICE_STR_SIZE]);\r
+\r
+/// Gets the GUID of the preferred VR audio device.\r
+///\r
+/// \param[out] deviceOutGuid The GUID of the user's preferred VR audio device to use,\r
+/// which will be valid upon a successful return value, else it will be NULL.\r
+///\r
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use\r
+/// ovr_GetLastErrorInfo to get more information.\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetAudioDeviceOutGuid(GUID* deviceOutGuid);\r
+\r
+/// Gets the GUID of the preferred VR microphone device as a string.\r
+///\r
+/// \param[out] deviceInStrBuffer A buffer where the GUID string for the device will copied to.\r
+///\r
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use\r
+/// ovr_GetLastErrorInfo to get more information.\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_GetAudioDeviceInGuidStr(WCHAR deviceInStrBuffer[OVR_AUDIO_MAX_DEVICE_STR_SIZE]);\r
+\r
+/// Gets the GUID of the preferred VR microphone device.\r
+///\r
+/// \param[out] deviceInGuid The GUID of the user's preferred VR audio device to use,\r
+/// which will be valid upon a successful return value, else it will be NULL.\r
+///\r
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use\r
+/// ovr_GetLastErrorInfo to get more information.\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetAudioDeviceInGuid(GUID* deviceInGuid);\r
+\r
+#endif // !defined(OVR_EXPORTING_CAPI)\r
+\r
+#endif // OVR_OS_MS\r
+\r
+#endif // OVR_CAPI_Audio_h\r
--- /dev/null
+/********************************************************************************/ /**\r
+ \file OVR_CAPI_D3D.h\r
+ \brief D3D specific structures used by the CAPI interface.\r
+ \copyright Copyright 2014-2016 Oculus VR, LLC All Rights reserved.\r
+ ************************************************************************************/\r
+\r
+#ifndef OVR_CAPI_D3D_h\r
+#define OVR_CAPI_D3D_h\r
+\r
+#include "OVR_CAPI.h"\r
+#include "OVR_Version.h"\r
+\r
+\r
+#if defined(_WIN32)\r
+#include <unknwn.h>\r
+#include <guiddef.h>\r
+\r
+#if !defined(OVR_EXPORTING_CAPI)\r
+\r
+//-----------------------------------------------------------------------------------\r
+// ***** Direct3D Specific\r
+\r
+/// Create Texture Swap Chain suitable for use with Direct3D 11 and 12.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] d3dPtr Specifies the application's D3D11Device to create resources with\r
+/// or the D3D12CommandQueue which must be the same one the application renders\r
+/// to the eye textures with.\r
+/// \param[in] desc Specifies requested texture properties. See notes for more info\r
+/// about texture format.\r
+/// \param[in] bindFlags Specifies what ovrTextureBindFlags the application requires\r
+/// for this texture chain.\r
+/// \param[out] out_TextureSwapChain Returns the created ovrTextureSwapChain, which will\r
+/// be valid upon a successful return value, else it will be NULL.\r
+/// This texture chain must be eventually destroyed via ovr_DestroyTextureSwapChain\r
+/// before destroying the session with ovr_Destroy.\r
+///\r
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use\r
+/// ovr_GetLastErrorInfo to get more information.\r
+///\r
+/// \note The texture format provided in \a desc should be thought of as the format the\r
+/// distortion-compositor will use for the ShaderResourceView when reading the contents of\r
+/// the texture. To that end, it is highly recommended that the application requests texture\r
+// swapchain formats that are in sRGB-space (e.g. OVR_FORMAT_R8G8B8A8_UNORM_SRGB)\r
+/// as the compositor does sRGB-correct rendering. As such, the compositor relies on the\r
+/// GPU's hardware sampler to do the sRGB-to-linear conversion. If the application still\r
+/// prefers to render to a linear format (e.g. OVR_FORMAT_R8G8B8A8_UNORM) while handling the\r
+/// linear-to-gamma conversion via HLSL code, then the application must still request the\r
+/// corresponding sRGB format and also use the \a ovrTextureMisc_DX_Typeless flag in the\r
+/// ovrTextureSwapChainDesc's Flag field. This will allow the application to create\r
+/// a RenderTargetView that is the desired linear format while the compositor continues to\r
+/// treat it as sRGB. Failure to do so will cause the compositor to apply unexpected gamma\r
+/// conversions leading to gamma-curve artifacts. The \a ovrTextureMisc_DX_Typeless\r
+/// flag for depth buffer formats (e.g. OVR_FORMAT_D32_FLOAT) is ignored as they are always\r
+/// converted to be typeless.\r
+///\r
+/// \see ovr_GetTextureSwapChainLength\r
+/// \see ovr_GetTextureSwapChainCurrentIndex\r
+/// \see ovr_GetTextureSwapChainDesc\r
+/// \see ovr_GetTextureSwapChainBufferDX\r
+/// \see ovr_DestroyTextureSwapChain\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_CreateTextureSwapChainDX(\r
+ ovrSession session,\r
+ IUnknown* d3dPtr,\r
+ const ovrTextureSwapChainDesc* desc,\r
+ ovrTextureSwapChain* out_TextureSwapChain);\r
+\r
+/// Get a specific buffer within the chain as any compatible COM interface (similar to\r
+/// QueryInterface)\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] chain Specifies an ovrTextureSwapChain previously returned\r
+/// by ovr_CreateTextureSwapChainDX\r
+/// \param[in] index Specifies the index within the chain to retrieve.\r
+/// Must be between 0 and length (see ovr_GetTextureSwapChainLength),\r
+/// or may pass -1 to get the buffer at the CurrentIndex location. (Saving a call to\r
+/// GetTextureSwapChainCurrentIndex)\r
+/// \param[in] iid Specifies the interface ID of the interface pointer to query the buffer for.\r
+/// \param[out] out_Buffer Returns the COM interface pointer retrieved.\r
+///\r
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use\r
+/// ovr_GetLastErrorInfo to get more information.\r
+///\r
+/// <b>Example code</b>\r
+/// \code{.cpp}\r
+/// ovr_GetTextureSwapChainBufferDX(session, chain, 0, IID_ID3D11Texture2D, &d3d11Texture);\r
+/// ovr_GetTextureSwapChainBufferDX(session, chain, 1, IID_PPV_ARGS(&dxgiResource));\r
+/// \endcode\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_GetTextureSwapChainBufferDX(\r
+ ovrSession session,\r
+ ovrTextureSwapChain chain,\r
+ int index,\r
+ IID iid,\r
+ void** out_Buffer);\r
+\r
+/// Create Mirror Texture which is auto-refreshed to mirror Rift contents produced by this\r
+/// application.\r
+///\r
+/// A second call to ovr_CreateMirrorTextureWithOptionsDX for a given ovrSession before destroying\r
+/// the first one is not supported and will result in an error return.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] d3dPtr Specifies the application's D3D11Device to create resources with\r
+/// or the D3D12CommandQueue which must be the same one the application renders to\r
+/// the textures with.\r
+/// \param[in] desc Specifies requested texture properties.\r
+/// See notes for more info about texture format.\r
+/// \param[out] out_MirrorTexture Returns the created ovrMirrorTexture, which will be valid upon a\r
+/// successful return value, else it will be NULL.\r
+/// This texture must be eventually destroyed via ovr_DestroyMirrorTexture before\r
+/// destroying the session with ovr_Destroy.\r
+///\r
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use\r
+/// ovr_GetLastErrorInfo to get more information.\r
+///\r
+/// \note The texture format provided in \a desc should be thought of as the format the compositor\r
+/// will use for the RenderTargetView when writing into mirror texture. To that end, it is\r
+/// highly recommended that the application requests a mirror texture format that is\r
+/// in sRGB-space (e.g. OVR_FORMAT_R8G8B8A8_UNORM_SRGB) as the compositor does sRGB-correct\r
+/// rendering. If however the application wants to still read the mirror texture as a linear\r
+/// format (e.g. OVR_FORMAT_R8G8B8A8_UNORM) and handle the sRGB-to-linear conversion in\r
+/// HLSL code, then it is recommended the application still requests an sRGB format and also\r
+/// use the \a ovrTextureMisc_DX_Typeless flag in the ovrMirrorTextureDesc's Flags field.\r
+/// This will allow the application to bind a ShaderResourceView that is a linear format\r
+/// while the compositor continues to treat is as sRGB. Failure to do so will cause the\r
+/// compositor to apply unexpected gamma conversions leading to gamma-curve artifacts.\r
+///\r
+///\r
+/// <b>Example code</b>\r
+/// \code{.cpp}\r
+/// ovrMirrorTexture mirrorTexture = nullptr;\r
+/// ovrMirrorTextureDesc mirrorDesc = {};\r
+/// mirrorDesc.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB;\r
+/// mirrorDesc.Width = mirrorWindowWidth;\r
+/// mirrorDesc.Height = mirrorWindowHeight;\r
+/// ovrResult result = ovr_CreateMirrorTextureWithOptionsDX(session, d3d11Device,\r
+/// &mirrorDesc, &mirrorTexture);\r
+/// [...]\r
+/// // Destroy the texture when done with it.\r
+/// ovr_DestroyMirrorTexture(session, mirrorTexture);\r
+/// mirrorTexture = nullptr;\r
+/// \endcode\r
+///\r
+/// \see ovr_GetMirrorTextureBufferDX\r
+/// \see ovr_DestroyMirrorTexture\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_CreateMirrorTextureWithOptionsDX(\r
+ ovrSession session,\r
+ IUnknown* d3dPtr,\r
+ const ovrMirrorTextureDesc* desc,\r
+ ovrMirrorTexture* out_MirrorTexture);\r
+\r
+/// Deprecated. Use ovr_CreateMirrorTextureWithOptionsDX instead\r
+///\r
+/// Same as ovr_CreateMirrorTextureWithOptionsDX except doesn't use ovrMirrorOptions flags as part\r
+/// of ovrMirrorTextureDesc's MirrorOptions field, and defaults to ovrMirrorOption_PostDistortion\r
+///\r
+/// \see ovrMirrorOptions, ovr_CreateMirrorTextureWithOptionsDX\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_CreateMirrorTextureDX(\r
+ ovrSession session,\r
+ IUnknown* d3dPtr,\r
+ const ovrMirrorTextureDesc* desc,\r
+ ovrMirrorTexture* out_MirrorTexture);\r
+\r
+/// Get a the underlying buffer as any compatible COM interface (similar to QueryInterface)\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] mirrorTexture Specifies an ovrMirrorTexture previously returned\r
+/// by ovr_CreateMirrorTextureWithOptionsDX\r
+/// \param[in] iid Specifies the interface ID of the interface pointer to query the buffer for.\r
+/// \param[out] out_Buffer Returns the COM interface pointer retrieved.\r
+///\r
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use\r
+/// ovr_GetLastErrorInfo to get more information.\r
+///\r
+/// <b>Example code</b>\r
+/// \code{.cpp}\r
+/// ID3D11Texture2D* d3d11Texture = nullptr;\r
+/// ovr_GetMirrorTextureBufferDX(session, mirrorTexture, IID_PPV_ARGS(&d3d11Texture));\r
+/// d3d11DeviceContext->CopyResource(d3d11TextureBackBuffer, d3d11Texture);\r
+/// d3d11Texture->Release();\r
+/// dxgiSwapChain->Present(0, 0);\r
+/// \endcode\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_GetMirrorTextureBufferDX(\r
+ ovrSession session,\r
+ ovrMirrorTexture mirrorTexture,\r
+ IID iid,\r
+ void** out_Buffer);\r
+\r
+#endif // !defined(OVR_EXPORTING_CAPI)\r
+\r
+#endif // _WIN32\r
+\r
+#endif // OVR_CAPI_D3D_h\r
--- /dev/null
+/********************************************************************************/ /**\r
+ \file OVR_CAPI_GL.h\r
+ \brief OpenGL-specific structures used by the CAPI interface.\r
+ \copyright Copyright 2015 Oculus VR, LLC. All Rights reserved.\r
+ ************************************************************************************/\r
+\r
+#ifndef OVR_CAPI_GL_h\r
+#define OVR_CAPI_GL_h\r
+\r
+#include "OVR_CAPI.h"\r
+\r
+#if !defined(OVR_EXPORTING_CAPI)\r
+\r
+/// Creates a TextureSwapChain suitable for use with OpenGL.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] desc Specifies the requested texture properties.\r
+/// See notes for more info about texture format.\r
+/// \param[out] out_TextureSwapChain Returns the created ovrTextureSwapChain,\r
+/// which will be valid upon a successful return value, else it will be NULL.\r
+/// This texture swap chain must be eventually destroyed via\r
+// ovr_DestroyTextureSwapChain before destroying the session with ovr_Destroy.\r
+///\r
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use\r
+/// ovr_GetLastErrorInfo to get more information.\r
+///\r
+/// \note The \a format provided should be thought of as the format the distortion compositor will\r
+/// use when reading the contents of the texture. To that end, it is highly recommended\r
+/// that the application requests texture swap chain formats that are in sRGB-space\r
+/// (e.g. OVR_FORMAT_R8G8B8A8_UNORM_SRGB) as the distortion compositor does sRGB-correct\r
+/// rendering. Furthermore, the app should then make sure "glEnable(GL_FRAMEBUFFER_SRGB);"\r
+/// is called before rendering into these textures. Even though it is not recommended,\r
+/// if the application would like to treat the texture as a linear format and do\r
+/// linear-to-gamma conversion in GLSL, then the application can avoid\r
+/// calling "glEnable(GL_FRAMEBUFFER_SRGB);", but should still pass in an sRGB variant for\r
+/// the \a format. Failure to do so will cause the distortion compositor to apply incorrect\r
+/// gamma conversions leading to gamma-curve artifacts.\r
+///\r
+/// \see ovr_GetTextureSwapChainLength\r
+/// \see ovr_GetTextureSwapChainCurrentIndex\r
+/// \see ovr_GetTextureSwapChainDesc\r
+/// \see ovr_GetTextureSwapChainBufferGL\r
+/// \see ovr_DestroyTextureSwapChain\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_CreateTextureSwapChainGL(\r
+ ovrSession session,\r
+ const ovrTextureSwapChainDesc* desc,\r
+ ovrTextureSwapChain* out_TextureSwapChain);\r
+\r
+/// Get a specific buffer within the chain as a GL texture name\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] chain Specifies an ovrTextureSwapChain previously returned\r
+/// by ovr_CreateTextureSwapChainGL\r
+/// \param[in] index Specifies the index within the chain to retrieve.\r
+/// Must be between 0 and length (see ovr_GetTextureSwapChainLength)\r
+/// or may pass -1 to get the buffer at the CurrentIndex location.\r
+/// (Saving a call to GetTextureSwapChainCurrentIndex)\r
+/// \param[out] out_TexId Returns the GL texture object name associated with\r
+/// the specific index requested\r
+///\r
+/// \return Returns an ovrResult indicating success or failure.\r
+/// In the case of failure, use ovr_GetLastErrorInfo to get more information.\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_GetTextureSwapChainBufferGL(\r
+ ovrSession session,\r
+ ovrTextureSwapChain chain,\r
+ int index,\r
+ unsigned int* out_TexId);\r
+\r
+/// Creates a Mirror Texture which is auto-refreshed to mirror Rift contents produced by this\r
+/// application.\r
+///\r
+/// A second call to ovr_CreateMirrorTextureWithOptionsGL for a given ovrSession before destroying\r
+/// the first one is not supported and will result in an error return.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] desc Specifies the requested mirror texture description.\r
+/// \param[out] out_MirrorTexture Specifies the created ovrMirrorTexture, which will be\r
+/// valid upon a successful return value, else it will be NULL.\r
+/// This texture must be eventually destroyed via ovr_DestroyMirrorTexture before\r
+/// destroying the session with ovr_Destroy.\r
+///\r
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use\r
+/// ovr_GetLastErrorInfo to get more information.\r
+///\r
+/// \note The \a format provided should be thought of as the format the distortion compositor will\r
+/// use when writing into the mirror texture. It is highly recommended that mirror textures\r
+// are requested as sRGB formats because the distortion compositor does sRGB-correct\r
+/// rendering. If the application requests a non-sRGB format (e.g. R8G8B8A8_UNORM) as the\r
+/// mirror texture, then the application might have to apply a manual linear-to-gamma\r
+/// conversion when reading from the mirror texture. Failure to do so can result in\r
+// incorrect gamma conversions leading to gamma-curve artifacts and color banding.\r
+///\r
+/// \see ovr_GetMirrorTextureBufferGL\r
+/// \see ovr_DestroyMirrorTexture\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_CreateMirrorTextureWithOptionsGL(\r
+ ovrSession session,\r
+ const ovrMirrorTextureDesc* desc,\r
+ ovrMirrorTexture* out_MirrorTexture);\r
+\r
+/// Deprecated. Use ovr_CreateMirrorTextureWithOptionsGL instead\r
+///\r
+/// Same as ovr_CreateMirrorTextureWithOptionsGL except doesn't use ovrMirrorOptions flags as part\r
+/// of ovrMirrorTextureDesc's MirrorOptions field, and defaults to ovrMirrorOption_PostDistortion\r
+///\r
+/// \see ovrMirrorOptions, ovr_CreateMirrorTextureWithOptionsGL\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_CreateMirrorTextureGL(\r
+ ovrSession session,\r
+ const ovrMirrorTextureDesc* desc,\r
+ ovrMirrorTexture* out_MirrorTexture);\r
+\r
+/// Get a the underlying buffer as a GL texture name\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] mirrorTexture Specifies an ovrMirrorTexture previously returned\r
+// by ovr_CreateMirrorTextureWithOptionsGL\r
+/// \param[out] out_TexId Specifies the GL texture object name associated with the mirror texture\r
+///\r
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use\r
+/// ovr_GetLastErrorInfo to get more information.\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_GetMirrorTextureBufferGL(\r
+ ovrSession session,\r
+ ovrMirrorTexture mirrorTexture,\r
+ unsigned int* out_TexId);\r
+\r
+#endif // !defined(OVR_EXPORTING_CAPI)\r
+\r
+#endif // OVR_CAPI_GL_h\r
--- /dev/null
+/********************************************************************************/ /**\r
+ \file OVR_CAPI.h\r
+ \brief Keys for CAPI proprty function calls\r
+ \copyright Copyright 2015 Oculus VR, LLC All Rights reserved.\r
+ ************************************************************************************/\r
+\r
+#ifndef OVR_CAPI_Keys_h\r
+#define OVR_CAPI_Keys_h\r
+\r
+#include "OVR_Version.h"\r
+\r
+\r
+\r
+#define OVR_KEY_USER "User" // string\r
+\r
+#define OVR_KEY_NAME "Name" // string\r
+\r
+#define OVR_KEY_GENDER "Gender" // string "Male", "Female", or "Unknown"\r
+#define OVR_DEFAULT_GENDER "Unknown"\r
+\r
+#define OVR_KEY_PLAYER_HEIGHT "PlayerHeight" // float meters\r
+#define OVR_DEFAULT_PLAYER_HEIGHT 1.778f\r
+\r
+#define OVR_KEY_EYE_HEIGHT "EyeHeight" // float meters\r
+#define OVR_DEFAULT_EYE_HEIGHT 1.675f\r
+\r
+#define OVR_KEY_NECK_TO_EYE_DISTANCE "NeckEyeDistance" // float[2] meters\r
+#define OVR_DEFAULT_NECK_TO_EYE_HORIZONTAL 0.0805f\r
+#define OVR_DEFAULT_NECK_TO_EYE_VERTICAL 0.075f\r
+\r
+#define OVR_KEY_EYE_TO_NOSE_DISTANCE "EyeToNoseDist" // float[2] meters\r
+\r
+\r
+\r
+#define OVR_PERF_HUD_MODE "PerfHudMode" // int, allowed values are defined in enum ovrPerfHudMode\r
+\r
+#define OVR_LAYER_HUD_MODE "LayerHudMode" // int, allowed values are defined in enum ovrLayerHudMode\r
+#define OVR_LAYER_HUD_CURRENT_LAYER "LayerHudCurrentLayer" // int, The layer to show\r
+#define OVR_LAYER_HUD_SHOW_ALL_LAYERS "LayerHudShowAll" // bool, Hide other layers when hud enabled\r
+\r
+#define OVR_DEBUG_HUD_STEREO_MODE "DebugHudStereoMode" // int, see enum ovrDebugHudStereoMode\r
+#define OVR_DEBUG_HUD_STEREO_GUIDE_INFO_ENABLE "DebugHudStereoGuideInfoEnable" // bool\r
+#define OVR_DEBUG_HUD_STEREO_GUIDE_SIZE "DebugHudStereoGuideSize2f" // float[2]\r
+#define OVR_DEBUG_HUD_STEREO_GUIDE_POSITION "DebugHudStereoGuidePosition3f" // float[3]\r
+#define OVR_DEBUG_HUD_STEREO_GUIDE_YAWPITCHROLL "DebugHudStereoGuideYawPitchRoll3f" // float[3]\r
+#define OVR_DEBUG_HUD_STEREO_GUIDE_COLOR "DebugHudStereoGuideColor4f" // float[4]\r
+\r
+#endif // OVR_CAPI_Keys_h\r
--- /dev/null
+/********************************************************************************/ /**\r
+ \file OVR_CAPI_Vk.h\r
+ \brief Vulkan specific structures used by the CAPI interface.\r
+ \copyright Copyright 2014-2017 Oculus VR, LLC All Rights reserved.\r
+ ************************************************************************************/\r
+\r
+#ifndef OVR_CAPI_Vk_h\r
+#define OVR_CAPI_Vk_h\r
+\r
+#include "OVR_CAPI.h"\r
+#include "OVR_Version.h"\r
+\r
+\r
+#if !defined(OVR_EXPORTING_CAPI)\r
+\r
+//-----------------------------------------------------------------------------------\r
+// ***** Vulkan Specific\r
+\r
+/// Get a list of Vulkan vkInstance extensions required for VR.\r
+///\r
+/// Returns a list of strings delimited by a single space identifying Vulkan extensions that must\r
+/// be enabled in order for the VR runtime to support Vulkan-based applications. The returned\r
+/// list reflects the current runtime version and the GPU the VR system is currently connected to.\r
+///\r
+/// \param[in] luid Specifies the luid for the relevant GPU, which is returned from ovr_Create.\r
+/// \param[in] extensionNames is a character buffer which will receive a list of extension name\r
+/// strings, separated by a single space char between each extension.\r
+/// \param[in] inoutExtensionNamesSize indicates on input the capacity of extensionNames in chars.\r
+/// On output it returns the number of characters written to extensionNames,\r
+/// including the terminating 0 char. In the case of this function returning\r
+/// ovrError_InsufficientArraySize, the required inoutExtensionNamesSize is returned.\r
+///\r
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use\r
+/// ovr_GetLastErrorInfo to get more information. Returns ovrError_InsufficientArraySize in\r
+/// the case that inoutExtensionNameSize didn't have enough space, in which case\r
+/// inoutExtensionNameSize will return the required inoutExtensionNamesSize.\r
+///\r
+/// <b>Example code</b>\r
+/// \code{.cpp}\r
+/// char extensionNames[4096];\r
+/// uint32_t extensionNamesSize = sizeof(extensionNames);\r
+/// ovr_GetInstanceExtensionsVk(luid, extensionsnames, &extensionNamesSize);\r
+///\r
+/// uint32_t extensionCount = 0;\r
+/// const char* extensionNamePtrs[256];\r
+/// for(const char* p = extensionNames; *p; ++p) {\r
+/// if((p == extensionNames) || (p[-1] == ' ')) {\r
+/// extensionNamePtrs[extensionCount++] = p;\r
+/// if (p[-1] == ' ')\r
+/// p[-1] = '\0';\r
+/// }\r
+/// }\r
+///\r
+/// VkInstanceCreateInfo info = { ... };\r
+/// info.enabledExtensionCount = extensionCount;\r
+/// info.ppEnabledExtensionNames = extensionNamePtrs;\r
+/// [...]\r
+/// \endcode\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_GetInstanceExtensionsVk(\r
+ ovrGraphicsLuid luid,\r
+ char* extensionNames,\r
+ uint32_t* inoutExtensionNamesSize);\r
+\r
+/// Get a list of Vulkan vkDevice extensions required for VR.\r
+///\r
+/// Returns a list of strings delimited by a single space identifying Vulkan extensions that must\r
+/// be enabled in order for the VR runtime to support Vulkan-based applications. The returned\r
+/// list reflects the current runtime version and the GPU the VR system is currently connected to.\r
+///\r
+/// \param[in] luid Specifies the luid for the relevant GPU, which is returned from ovr_Create.\r
+/// \param[in] extensionNames is a character buffer which will receive a list of extension name\r
+/// strings, separated by a single space char between each extension.\r
+/// \param[in] inoutExtensionNamesSize indicates on input the capacity of extensionNames in chars.\r
+/// On output it returns the number of characters written to extensionNames,\r
+/// including the terminating 0 char. In the case of this function returning\r
+/// ovrError_InsufficientArraySize, the required inoutExtensionNamesSize is returned.\r
+///\r
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use\r
+/// ovr_GetLastErrorInfo to get more information. Returns ovrError_InsufficientArraySize in\r
+/// the case that inoutExtensionNameSize didn't have enough space, in which case\r
+/// inoutExtensionNameSize will return the required inoutExtensionNamesSize.\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_GetDeviceExtensionsVk(\r
+ ovrGraphicsLuid luid,\r
+ char* extensionNames,\r
+ uint32_t* inoutExtensionNamesSize);\r
+\r
+/// Find VkPhysicalDevice matching ovrGraphicsLuid\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] luid Specifies the luid returned from ovr_Create.\r
+/// \param[in] instance Specifies a VkInstance to search for matching luids in.\r
+/// \param[out] out_physicalDevice Returns the VkPhysicalDevice matching the instance and luid.\r
+///\r
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use\r
+/// ovr_GetLastErrorInfo to get more information.\r
+///\r
+/// \note This function enumerates the current physical devices and returns the one matching the\r
+/// luid. It must be called at least once prior to any ovr_CreateTextureSwapChainVk or\r
+/// ovr_CreateMirrorTextureWithOptionsVk calls, and the instance must remain valid for the lifetime\r
+/// of the returned objects. It is assumed the VkDevice created by the application will be for the\r
+/// returned physical device.\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_GetSessionPhysicalDeviceVk(\r
+ ovrSession session,\r
+ ovrGraphicsLuid luid,\r
+ VkInstance instance,\r
+ VkPhysicalDevice* out_physicalDevice);\r
+\r
+/// Select VkQueue to block on till rendering is complete\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] queue Specifies a VkQueue to add a VkFence operation to and wait on.\r
+///\r
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use\r
+/// ovr_GetLastErrorInfo to get more information.\r
+///\r
+/// \note The queue may be changed at any time but only the value at the time ovr_SubmitFrame\r
+/// is called will be used. ovr_SetSynchronizationQueueVk must be called with a valid VkQueue\r
+/// created on the same VkDevice the texture sets were created on prior to the first call to\r
+/// ovr_SubmitFrame. An internally created VkFence object will be signalled by the completion\r
+/// of operations on queue and waited on to synchronize the VR compositor.\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_SetSynchronizationQueueVk(ovrSession session, VkQueue queue);\r
+// Backwards compatibility for the original typoed version\r
+#define ovr_SetSynchonizationQueueVk ovr_SetSynchronizationQueueVk\r
+// Define OVR_PREVIEW_DEPRECATION to generate warnings for upcoming API deprecations\r
+#if defined(OVR_PREVIEW_DEPRECATION)\r
+#pragma deprecated("ovr_SetSynchonizationQueueVk")\r
+#endif\r
+\r
+/// Create Texture Swap Chain suitable for use with Vulkan\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] device Specifies the application's VkDevice to create resources with.\r
+/// \param[in] desc Specifies requested texture properties. See notes for more info\r
+/// about texture format.\r
+/// \param[out] out_TextureSwapChain Returns the created ovrTextureSwapChain, which will be valid\r
+/// upon a successful return value, else it will be NULL.\r
+/// This texture chain must be eventually destroyed via ovr_DestroyTextureSwapChain\r
+/// before destroying the session with ovr_Destroy.\r
+///\r
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use\r
+/// ovr_GetLastErrorInfo to get more information.\r
+///\r
+/// \note The texture format provided in \a desc should be thought of as the format the\r
+/// distortion-compositor will use for the ShaderResourceView when reading the contents\r
+/// of the texture. To that end, it is highly recommended that the application\r
+/// requests texture swapchain formats that are in sRGB-space\r
+/// (e.g. OVR_FORMAT_R8G8B8A8_UNORM_SRGB) as the compositor does sRGB-correct rendering.\r
+/// As such, the compositor relies on the GPU's hardware sampler to do the sRGB-to-linear\r
+/// conversion. If the application still prefers to render to a linear format (e.g.\r
+/// OVR_FORMAT_R8G8B8A8_UNORM) while handling the linear-to-gamma conversion via\r
+/// SPIRV code, then the application must still request the corresponding sRGB format and\r
+/// also use the \a ovrTextureMisc_DX_Typeless flag in the ovrTextureSwapChainDesc's\r
+/// Flag field. This will allow the application to create a RenderTargetView that is the\r
+/// desired linear format while the compositor continues to treat it as sRGB. Failure to\r
+/// do so will cause the compositor to apply unexpected gamma conversions leading to\r
+/// gamma-curve artifacts. The \a ovrTextureMisc_DX_Typeless flag for depth buffer formats\r
+/// (e.g. OVR_FORMAT_D32_FLOAT) is ignored as they are always\r
+/// converted to be typeless.\r
+///\r
+/// \see ovr_GetTextureSwapChainLength\r
+/// \see ovr_GetTextureSwapChainCurrentIndex\r
+/// \see ovr_GetTextureSwapChainDesc\r
+/// \see ovr_GetTextureSwapChainBufferVk\r
+/// \see ovr_DestroyTextureSwapChain\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_CreateTextureSwapChainVk(\r
+ ovrSession session,\r
+ VkDevice device,\r
+ const ovrTextureSwapChainDesc* desc,\r
+ ovrTextureSwapChain* out_TextureSwapChain);\r
+\r
+/// Get a specific VkImage within the chain\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] chain Specifies an ovrTextureSwapChain previously returned by\r
+/// ovr_CreateTextureSwapChainVk\r
+/// \param[in] index Specifies the index within the chain to retrieve.\r
+/// Must be between 0 and length (see ovr_GetTextureSwapChainLength),\r
+/// or may pass -1 to get the buffer at the CurrentIndex location (saving a\r
+/// call to GetTextureSwapChainCurrentIndex).\r
+/// \param[out] out_Image Returns the VkImage retrieved.\r
+///\r
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use\r
+/// ovr_GetLastErrorInfo to get more information.\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_GetTextureSwapChainBufferVk(\r
+ ovrSession session,\r
+ ovrTextureSwapChain chain,\r
+ int index,\r
+ VkImage* out_Image);\r
+\r
+/// Create Mirror Texture which is auto-refreshed to mirror Rift contents produced by this\r
+/// application.\r
+///\r
+/// A second call to ovr_CreateMirrorTextureWithOptionsVk for a given ovrSession before destroying\r
+/// the first one is not supported and will result in an error return.\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] device Specifies the VkDevice to create resources with.\r
+/// \param[in] desc Specifies requested texture properties. See notes for more info\r
+/// about texture format.\r
+/// \param[out] out_MirrorTexture Returns the created ovrMirrorTexture, which will be\r
+/// valid upon a successful return value, else it will be NULL.\r
+/// This texture must be eventually destroyed via ovr_DestroyMirrorTexture before\r
+/// destroying the session with ovr_Destroy.\r
+///\r
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use\r
+/// ovr_GetLastErrorInfo to get more information.\r
+///\r
+/// \note The texture format provided in \a desc should be thought of as the format the\r
+/// compositor will use for the VkImageView when writing into mirror texture. To that end,\r
+/// it is highly recommended that the application requests a mirror texture format that is\r
+/// in sRGB-space (e.g. OVR_FORMAT_R8G8B8A8_UNORM_SRGB) as the compositor does sRGB-correct\r
+/// rendering. If however the application wants to still read the mirror texture as a\r
+/// linear format (e.g. OVR_FORMAT_R8G8B8A8_UNORM) and handle the sRGB-to-linear conversion\r
+/// in SPIRV code, then it is recommended the application still requests an sRGB format and\r
+/// also use the \a ovrTextureMisc_DX_Typeless flag in the ovrMirrorTextureDesc's\r
+/// Flags field. This will allow the application to bind a ShaderResourceView that is a\r
+/// linear format while the compositor continues to treat is as sRGB. Failure to do so will\r
+/// cause the compositor to apply unexpected gamma conversions leading to\r
+/// gamma-curve artifacts.\r
+///\r
+/// <b>Example code</b>\r
+/// \code{.cpp}\r
+/// ovrMirrorTexture mirrorTexture = nullptr;\r
+/// ovrMirrorTextureDesc mirrorDesc = {};\r
+/// mirrorDesc.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB;\r
+/// mirrorDesc.Width = mirrorWindowWidth;\r
+/// mirrorDesc.Height = mirrorWindowHeight;\r
+/// ovrResult result = ovr_CreateMirrorTextureWithOptionsVk(session, vkDevice, &mirrorDesc,\r
+/// &mirrorTexture);\r
+/// [...]\r
+/// // Destroy the texture when done with it.\r
+/// ovr_DestroyMirrorTexture(session, mirrorTexture);\r
+/// mirrorTexture = nullptr;\r
+/// \endcode\r
+///\r
+/// \see ovr_GetMirrorTextureBufferVk\r
+/// \see ovr_DestroyMirrorTexture\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_CreateMirrorTextureWithOptionsVk(\r
+ ovrSession session,\r
+ VkDevice device,\r
+ const ovrMirrorTextureDesc* desc,\r
+ ovrMirrorTexture* out_MirrorTexture);\r
+\r
+/// Get a the underlying mirror VkImage\r
+///\r
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.\r
+/// \param[in] mirrorTexture Specifies an ovrMirrorTexture previously returned by\r
+/// ovr_CreateMirrorTextureWithOptionsVk\r
+/// \param[out] out_Image Returns the VkImage pointer retrieved.\r
+///\r
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use\r
+/// ovr_GetLastErrorInfo to get more information.\r
+///\r
+/// <b>Example code</b>\r
+/// \code{.cpp}\r
+/// VkImage mirrorImage = VK_NULL_HANDLE;\r
+/// ovr_GetMirrorTextureBufferVk(session, mirrorTexture, &mirrorImage);\r
+/// ...\r
+/// vkCmdBlitImage(commandBuffer, mirrorImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,\r
+/// presentImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion, VK_FILTER_LINEAR);\r
+/// ...\r
+/// vkQueuePresentKHR(queue, &presentInfo);\r
+/// \endcode\r
+///\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_GetMirrorTextureBufferVk(\r
+ ovrSession session,\r
+ ovrMirrorTexture mirrorTexture,\r
+ VkImage* out_Image);\r
+\r
+#endif // !defined(OVR_EXPORTING_CAPI)\r
+\r
+#endif // OVR_CAPI_Vk_h\r
--- /dev/null
+/********************************************************************************/ /**\r
+ \file OVR_ErrorCode.h\r
+ \brief This header provides LibOVR error code declarations.\r
+ \copyright Copyright 2015-2016 Oculus VR, LLC All Rights reserved.\r
+ *************************************************************************************/\r
+\r
+#ifndef OVR_ErrorCode_h\r
+#define OVR_ErrorCode_h\r
+\r
+#include "OVR_Version.h"\r
+#include <stdint.h>\r
+\r
+\r
+\r
+#ifndef OVR_RESULT_DEFINED\r
+#define OVR_RESULT_DEFINED ///< Allows ovrResult to be independently defined.\r
+/// API call results are represented at the highest level by a single ovrResult.\r
+typedef int32_t ovrResult;\r
+#endif\r
+\r
+/// \brief Indicates if an ovrResult indicates success.\r
+///\r
+/// Some functions return additional successful values other than ovrSucces and\r
+/// require usage of this macro to indicate successs.\r
+///\r
+#if !defined(OVR_SUCCESS)\r
+#define OVR_SUCCESS(result) (result >= 0)\r
+#endif\r
+\r
+/// \brief Indicates if an ovrResult indicates an unqualified success.\r
+///\r
+/// This is useful for indicating that the code intentionally wants to\r
+/// check for result == ovrSuccess as opposed to OVR_SUCCESS(), which\r
+/// checks for result >= ovrSuccess.\r
+///\r
+#if !defined(OVR_UNQUALIFIED_SUCCESS)\r
+#define OVR_UNQUALIFIED_SUCCESS(result) (result == ovrSuccess)\r
+#endif\r
+\r
+/// \brief Indicates if an ovrResult indicates failure.\r
+///\r
+#if !defined(OVR_FAILURE)\r
+#define OVR_FAILURE(result) (!OVR_SUCCESS(result))\r
+#endif\r
+\r
+// Success is a value greater or equal to 0, while all error types are negative values.\r
+#ifndef OVR_SUCCESS_DEFINED\r
+#define OVR_SUCCESS_DEFINED ///< Allows ovrResult to be independently defined.\r
+typedef enum ovrSuccessType_ {\r
+ /// This is a general success result. Use OVR_SUCCESS to test for success.\r
+ ovrSuccess = 0,\r
+} ovrSuccessType;\r
+#endif\r
+\r
+// Public success types\r
+// Success is a value greater or equal to 0, while all error types are negative values.\r
+typedef enum ovrSuccessTypes_ {\r
+ /// Returned from a call to SubmitFrame. The call succeeded, but what the app\r
+ /// rendered will not be visible on the HMD. Ideally the app should continue\r
+ /// calling SubmitFrame, but not do any rendering. When the result becomes\r
+ /// ovrSuccess, rendering should continue as usual.\r
+ ovrSuccess_NotVisible = 1000,\r
+\r
+ /// Boundary is invalid due to sensor change or was not setup.\r
+ ovrSuccess_BoundaryInvalid = 1001,\r
+\r
+ /// Device is not available for the requested operation.\r
+ ovrSuccess_DeviceUnavailable = 1002,\r
+} ovrSuccessTypes;\r
+\r
+// Public error types\r
+typedef enum ovrErrorType_ {\r
+ /******************/\r
+ /* General errors */\r
+ /******************/\r
+\r
+ /// Failure to allocate memory.\r
+ ovrError_MemoryAllocationFailure = -1000,\r
+\r
+ /// Invalid ovrSession parameter provided.\r
+ ovrError_InvalidSession = -1002,\r
+\r
+ /// The operation timed out.\r
+ ovrError_Timeout = -1003,\r
+\r
+ /// The system or component has not been initialized.\r
+ ovrError_NotInitialized = -1004,\r
+\r
+ /// Invalid parameter provided. See error info or log for details.\r
+ ovrError_InvalidParameter = -1005,\r
+\r
+ /// Generic service error. See error info or log for details.\r
+ ovrError_ServiceError = -1006,\r
+\r
+ /// The given HMD doesn't exist.\r
+ ovrError_NoHmd = -1007,\r
+\r
+ /// Function call is not supported on this hardware/software\r
+ ovrError_Unsupported = -1009,\r
+\r
+ /// Specified device type isn't available.\r
+ ovrError_DeviceUnavailable = -1010,\r
+\r
+ /// The headset was in an invalid orientation for the requested\r
+ /// operation (e.g. vertically oriented during ovr_RecenterPose).\r
+ ovrError_InvalidHeadsetOrientation = -1011,\r
+\r
+ /// The client failed to call ovr_Destroy on an active session before calling ovr_Shutdown.\r
+ /// Or the client crashed.\r
+ ovrError_ClientSkippedDestroy = -1012,\r
+\r
+ /// The client failed to call ovr_Shutdown or the client crashed.\r
+ ovrError_ClientSkippedShutdown = -1013,\r
+\r
+ ///< The service watchdog discovered a deadlock.\r
+ ovrError_ServiceDeadlockDetected = -1014,\r
+\r
+ ///< Function call is invalid for object's current state\r
+ ovrError_InvalidOperation = -1015,\r
+\r
+ ///< Increase size of output array\r
+ ovrError_InsufficientArraySize = -1016,\r
+\r
+ /// There is not any external camera information stored by ovrServer.\r
+ ovrError_NoExternalCameraInfo = -1017,\r
+\r
+ /// Tracking is lost when ovr_GetDevicePoses() is called.\r
+ ovrError_LostTracking = -1018,\r
+\r
+ /// There was a problem initializing the external camera for capture\r
+ ovrError_ExternalCameraInitializedFailed = -1019,\r
+\r
+ /// There was a problem capturing external camera frames\r
+ ovrError_ExternalCameraCaptureFailed = -1020,\r
+\r
+ /// The external camera friendly name list and the external camera name list\r
+ /// are not the fixed size(OVR_MAX_EXTERNAL_CAMERA_NAME_BUFFER_SIZE).\r
+ ovrError_ExternalCameraNameListsBufferSize = -1021,\r
+\r
+ /// The external camera friendly name list is not the same size as\r
+ /// the external camera name list.\r
+ ovrError_ExternalCameraNameListsMistmatch = -1022,\r
+\r
+ /// The external camera property has not been sent to OVRServer\r
+ /// when the user tries to open the camera.\r
+ ovrError_ExternalCameraNotCalibrated = -1023,\r
+\r
+ /// The external camera name is larger than OVR_EXTERNAL_CAMERA_NAME_SIZE-1\r
+ ovrError_ExternalCameraNameWrongSize = -1024,\r
+\r
+ /*************************************************/\r
+ /* Audio error range, reserved for Audio errors. */\r
+ /*************************************************/\r
+\r
+ /// Failure to find the specified audio device.\r
+ ovrError_AudioDeviceNotFound = -2001,\r
+\r
+ /// Generic COM error.\r
+ ovrError_AudioComError = -2002,\r
+\r
+ /**************************/\r
+ /* Initialization errors. */\r
+ /**************************/\r
+\r
+ /// Generic initialization error.\r
+ ovrError_Initialize = -3000,\r
+\r
+ /// Couldn't load LibOVRRT.\r
+ ovrError_LibLoad = -3001,\r
+\r
+ /// LibOVRRT version incompatibility.\r
+ ovrError_LibVersion = -3002,\r
+\r
+ /// Couldn't connect to the OVR Service.\r
+ ovrError_ServiceConnection = -3003,\r
+\r
+ /// OVR Service version incompatibility.\r
+ ovrError_ServiceVersion = -3004,\r
+\r
+ /// The operating system version is incompatible.\r
+ ovrError_IncompatibleOS = -3005,\r
+\r
+ /// Unable to initialize the HMD display.\r
+ ovrError_DisplayInit = -3006,\r
+\r
+ /// Unable to start the server. Is it already running?\r
+ ovrError_ServerStart = -3007,\r
+\r
+ /// Attempting to re-initialize with a different version.\r
+ ovrError_Reinitialization = -3008,\r
+\r
+ /// Chosen rendering adapters between client and service do not match\r
+ ovrError_MismatchedAdapters = -3009,\r
+\r
+ /// Calling application has leaked resources\r
+ ovrError_LeakingResources = -3010,\r
+\r
+ /// Client version too old to connect to service\r
+ ovrError_ClientVersion = -3011,\r
+\r
+ /// The operating system is out of date.\r
+ ovrError_OutOfDateOS = -3012,\r
+\r
+ /// The graphics driver is out of date.\r
+ ovrError_OutOfDateGfxDriver = -3013,\r
+\r
+ /// The graphics hardware is not supported\r
+ ovrError_IncompatibleGPU = -3014,\r
+\r
+ /// No valid VR display system found.\r
+ ovrError_NoValidVRDisplaySystem = -3015,\r
+\r
+ /// Feature or API is obsolete and no longer supported.\r
+ ovrError_Obsolete = -3016,\r
+\r
+ /// No supported VR display system found, but disabled or driverless adapter found.\r
+ ovrError_DisabledOrDefaultAdapter = -3017,\r
+\r
+ /// The system is using hybrid graphics (Optimus, etc...), which is not support.\r
+ ovrError_HybridGraphicsNotSupported = -3018,\r
+\r
+ /// Initialization of the DisplayManager failed.\r
+ ovrError_DisplayManagerInit = -3019,\r
+\r
+ /// Failed to get the interface for an attached tracker\r
+ ovrError_TrackerDriverInit = -3020,\r
+\r
+ /// LibOVRRT signature check failure.\r
+ ovrError_LibSignCheck = -3021,\r
+\r
+ /// LibOVRRT path failure.\r
+ ovrError_LibPath = -3022,\r
+\r
+ /// LibOVRRT symbol resolution failure.\r
+ ovrError_LibSymbols = -3023,\r
+\r
+ /// Failed to connect to the service because remote connections to the service are not allowed.\r
+ ovrError_RemoteSession = -3024,\r
+\r
+ /// Vulkan initialization error.\r
+ ovrError_InitializeVulkan = -3025,\r
+\r
+ /// The graphics driver is black-listed.\r
+ ovrError_BlacklistedGfxDriver = -3026,\r
+\r
+ /********************/\r
+ /* Rendering errors */\r
+ /********************/\r
+\r
+ /// In the event of a system-wide graphics reset or cable unplug this is returned to the app.\r
+ ovrError_DisplayLost = -6000,\r
+\r
+ /// ovr_CommitTextureSwapChain was called too many times on a texture swapchain without\r
+ /// calling submit to use the chain.\r
+ ovrError_TextureSwapChainFull = -6001,\r
+\r
+ /// The ovrTextureSwapChain is in an incomplete or inconsistent state.\r
+ /// Ensure ovr_CommitTextureSwapChain was called at least once first.\r
+ ovrError_TextureSwapChainInvalid = -6002,\r
+\r
+ /// Graphics device has been reset (TDR, etc...)\r
+ ovrError_GraphicsDeviceReset = -6003,\r
+\r
+ /// HMD removed from the display adapter\r
+ ovrError_DisplayRemoved = -6004,\r
+\r
+ /// Content protection is not available for the display.\r
+ ovrError_ContentProtectionNotAvailable = -6005,\r
+\r
+ /// Application declared itself as an invisible type and is not allowed to submit frames.\r
+ ovrError_ApplicationInvisible = -6006,\r
+\r
+ /// The given request is disallowed under the current conditions.\r
+ ovrError_Disallowed = -6007,\r
+\r
+ /// Display portion of HMD is plugged into an incompatible port (ex: IGP)\r
+ ovrError_DisplayPluggedIncorrectly = -6008,\r
+\r
+ /// Returned in the event a virtual display system reaches a display limit\r
+ ovrError_DisplayLimitReached = -6009,\r
+\r
+ /****************/\r
+ /* Fatal errors */\r
+ /****************/\r
+\r
+ ///< A runtime exception occurred. The application is required to shutdown LibOVR and\r
+ /// re-initialize it before this error state will be cleared.\r
+ ovrError_RuntimeException = -7000,\r
+\r
+ /**********************/\r
+ /* Calibration errors */\r
+ /**********************/\r
+\r
+ /// Result of a missing calibration block\r
+ ovrError_NoCalibration = -9000,\r
+\r
+ /// Result of an old calibration block\r
+ ovrError_OldVersion = -9001,\r
+\r
+ /// Result of a bad calibration block due to lengths\r
+ ovrError_MisformattedBlock = -9002,\r
+\r
+/****************/\r
+/* Other errors */\r
+/****************/\r
+\r
+\r
+} ovrErrorType;\r
+\r
+/// Provides information about the last error.\r
+/// \see ovr_GetLastErrorInfo\r
+typedef struct ovrErrorInfo_ {\r
+ /// The result from the last API call that generated an error ovrResult.\r
+ ovrResult Result;\r
+\r
+ /// A UTF8-encoded null-terminated English string describing the problem.\r
+ /// The format of this string is subject to change in future versions.\r
+ char ErrorString[512];\r
+} ovrErrorInfo;\r
+\r
+#endif /* OVR_ErrorCode_h */\r
--- /dev/null
+/*************************************************************************************\r
+ \file OVR_Version.h\r
+ \brief This header provides LibOVR version identification.\r
+ \copyright Copyright 2014-2016 Oculus VR, LLC All Rights reserved.\r
+ *************************************************************************************/\r
+\r
+#ifndef OVR_Version_h\r
+#define OVR_Version_h\r
+\r
+\r
+/// Conventional string-ification macro.\r
+#if !defined(OVR_STRINGIZE)\r
+#define OVR_STRINGIZEIMPL(x) #x\r
+#define OVR_STRINGIZE(x) OVR_STRINGIZEIMPL(x)\r
+#endif\r
+\r
+// Master version numbers\r
+#define OVR_PRODUCT_VERSION 1 // Product version doesn't participate in semantic versioning.\r
+#define OVR_MAJOR_VERSION 1 // If you change these values then you need to also make sure to change\r
+// LibOVR/Projects/Windows/LibOVR.props in parallel.\r
+#define OVR_MINOR_VERSION 24 //\r
+#define OVR_PATCH_VERSION 0\r
+#define OVR_BUILD_NUMBER 0\r
+\r
+// This is the ((product * 100) + major) version of the service that the DLL is compatible with.\r
+// When we backport changes to old versions of the DLL we update the old DLLs\r
+// to move this version number up to the latest version.\r
+// The DLL is responsible for checking that the service is the version it supports\r
+// and returning an appropriate error message if it has not been made compatible.\r
+#define OVR_DLL_COMPATIBLE_VERSION 101\r
+\r
+// This is the minor version representing the minimum version an application can query with this\r
+// SDK. Calls ovr_Initialize will fail if the application requests a version that is less than this.\r
+#define OVR_MIN_REQUESTABLE_MINOR_VERSION 17\r
+\r
+#define OVR_FEATURE_VERSION 0\r
+\r
+/// "Major.Minor.Patch"\r
+#if !defined(OVR_VERSION_STRING)\r
+#define OVR_VERSION_STRING OVR_STRINGIZE(OVR_MAJOR_VERSION.OVR_MINOR_VERSION.OVR_PATCH_VERSION)\r
+#endif\r
+\r
+/// "Major.Minor.Patch.Build"\r
+#if !defined(OVR_DETAILED_VERSION_STRING)\r
+#define OVR_DETAILED_VERSION_STRING \\r
+ OVR_STRINGIZE(OVR_MAJOR_VERSION.OVR_MINOR_VERSION.OVR_PATCH_VERSION.OVR_BUILD_NUMBER)\r
+#endif\r
+\r
+/// \brief file description for version info\r
+/// This appears in the user-visible file properties. It is intended to convey publicly\r
+/// available additional information such as feature builds.\r
+#if !defined(OVR_FILE_DESCRIPTION_STRING)\r
+#if defined(_DEBUG)\r
+#define OVR_FILE_DESCRIPTION_STRING "dev build debug"\r
+#else\r
+#define OVR_FILE_DESCRIPTION_STRING "dev build"\r
+#endif\r
+#endif\r
+\r
+#endif // OVR_Version_h\r
--- /dev/null
+
+CXX = i686-w64-mingw32-g++
+LINK = i686-w64-mingw32-g++ --shared
+CXXFLAGS = -pipe -fPIC -O2 -fpermissive -DUNICODE=1 -D_UNICODE=1
+//-DNTSTATUS=DWORD
+
+LIBOVRPATH = .
+INCPATH = -I. -IInclude -ISrc -I3rdparty
+SRCPATH = Shim
+OBJPATH = Shim
+CXXBUILD = $(CXX) -c $(CXXFLAGS) $(INCPATH) -o $(OBJPATH)/
+
+TARGET = libovr.dll
+
+all: $(TARGET)
+
+OBJECTS = \
+ $(OBJPATH)/OVR_CAPIShim.o \
+ $(OBJPATH)/OVR_CAPI_Util.o \
+ $(OBJPATH)/OVR_StereoProjection.o
+
+$(TARGET): $(OBJECTS)
+ $(LINK) -o $(TARGET) $(OBJECTS) $(LINKFLAGS)
+
+$(OBJPATH)/OVR_CAPIShim.o: $(SRCPATH)/OVR_CAPIShim.c
+ $(CXXBUILD)OVR_CAPIShim.o $(SRCPATH)/OVR_CAPIShim.c
+
+$(OBJPATH)/OVR_CAPI_Util.o: $(SRCPATH)/OVR_CAPI_Util.cpp
+ $(CXXBUILD)OVR_CAPI_Util.o $(SRCPATH)/OVR_CAPI_Util.cpp
+
+$(OBJPATH)/OVR_StereoProjection.o: $(SRCPATH)/OVR_StereoProjection.cpp
+ $(CXXBUILD)OVR_StereoProjection.o $(SRCPATH)/OVR_StereoProjection.cpp
+
+install: $(TARGET)
+ cp $(TARGET) /usr/i686-w64-mingw32/lib/
+ cp -a Include/* /usr/i686-w64-mingw32/include/
+
+clean:
+ rm -f $(OBJPATH)/*.o
+ rm -f $(TARGET)
+
--- /dev/null
+/************************************************************************************\r
+\r
+Filename : OVR_CAPIShim.c\r
+Content : CAPI DLL user library\r
+Created : November 20, 2014\r
+Copyright : Copyright 2014-2016 Oculus VR, LLC All Rights reserved.\r
+\r
+Licensed under the Oculus VR Rift SDK License Version 3.3 (the "License");\r
+you may not use the Oculus VR Rift SDK except in compliance with the License,\r
+which is provided at the time of installation or download, or which\r
+otherwise accompanies this software in either electronic or hard copy form.\r
+\r
+You may obtain a copy of the License at\r
+\r
+http://www.oculusvr.com/licenses/LICENSE-3.3\r
+\r
+Unless required by applicable law or agreed to in writing, the Oculus VR SDK\r
+distributed under the License is distributed on an "AS IS" BASIS,\r
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+See the License for the specific language governing permissions and\r
+limitations under the License.\r
+\r
+************************************************************************************/\r
+\r
+#include "OVR_CAPI.h"\r
+#include "OVR_Version.h"\r
+#include "OVR_ErrorCode.h"\r
+#include "OVR_CAPI_Prototypes.h"\r
+#include <stdio.h>\r
+#include <stdlib.h>\r
+#include <stddef.h>\r
+#include <stdint.h>\r
+#include <string.h>\r
+#include <ctype.h>\r
+#include <assert.h>\r
+\r
+#if defined(_WIN32)\r
+#if defined(_MSC_VER)\r
+#pragma warning(push, 0)\r
+#endif\r
+#include <windows.h>\r
+#if defined(_MSC_VER)\r
+#pragma warning(pop)\r
+#endif\r
+\r
+#include "../Include/OVR_CAPI_D3D.h"\r
+#else\r
+#if defined(__APPLE__)\r
+#include <mach-o/dyld.h>\r
+#include <sys/syslimits.h>\r
+#include <libgen.h>\r
+#include <pwd.h>\r
+#include <unistd.h>\r
+#endif\r
+#include <dlfcn.h>\r
+#include <sys/stat.h>\r
+#include <unistd.h>\r
+#endif\r
+#include "../Include/OVR_CAPI_GL.h"\r
+#include "../Include/OVR_CAPI_Vk.h"\r
+\r
+\r
+#if defined(_MSC_VER)\r
+#pragma warning(push)\r
+#pragma warning(disable : 4996) // 'getenv': This function or variable may be unsafe.\r
+#endif\r
+\r
+// clang-format off\r
+static const uint8_t OculusSDKUniqueIdentifier[] = {\r
+ 0x9E, 0xB2, 0x0B, 0x1A, 0xB7, 0x97, 0x09, 0x20, 0xE0, 0xFB, 0x83, 0xED, 0xF8, 0x33, 0x5A, 0xEB,\r
+ 0x80, 0x4D, 0x8E, 0x92, 0x20, 0x69, 0x13, 0x56, 0xB4, 0xBB, 0xC4, 0x85, 0xA7, 0x9E, 0xA4, 0xFE,\r
+ OVR_MAJOR_VERSION, OVR_MINOR_VERSION, OVR_PATCH_VERSION\r
+};\r
+\r
+// clang-format on\r
+\r
+static const uint8_t OculusSDKUniqueIdentifierXORResult = 0xcb;\r
+\r
+// -----------------------------------------------------------------------------------\r
+// ***** OVR_ENABLE_DEVELOPER_SEARCH\r
+//\r
+// If defined then our shared library loading code searches for developer build\r
+// directories.\r
+//\r
+#if !defined(OVR_ENABLE_DEVELOPER_SEARCH)\r
+#endif\r
+\r
+// -----------------------------------------------------------------------------------\r
+// ***** OVR_BUILD_DEBUG\r
+//\r
+// Defines OVR_BUILD_DEBUG when the compiler default debug preprocessor is set.\r
+//\r
+// If you want to control the behavior of these flags, then explicitly define\r
+// either -DOVR_BUILD_RELEASE or -DOVR_BUILD_DEBUG in the compiler arguments.\r
+\r
+#if !defined(OVR_BUILD_DEBUG) && !defined(OVR_BUILD_RELEASE)\r
+#if defined(_MSC_VER)\r
+#if defined(_DEBUG)\r
+#define OVR_BUILD_DEBUG\r
+#endif\r
+#else\r
+#if defined(DEBUG)\r
+#define OVR_BUILD_DEBUG\r
+#endif\r
+#endif\r
+#endif\r
+\r
+//-----------------------------------------------------------------------------------\r
+// ***** FilePathCharType, ModuleHandleType, ModuleFunctionType\r
+//\r
+#if defined(_WIN32) // We need to use wchar_t on Microsoft platforms, as that's the native file\r
+// system character type.\r
+#define FilePathCharType \\r
+ wchar_t // #define instead of typedef because debuggers (VC++, XCode) don't recognize typedef'd\r
+// types as a string type.\r
+typedef HMODULE ModuleHandleType;\r
+typedef FARPROC ModuleFunctionType;\r
+#else\r
+#define FilePathCharType char\r
+typedef void* ModuleHandleType;\r
+typedef void* ModuleFunctionType;\r
+#endif\r
+\r
+#define ModuleHandleTypeNull ((ModuleHandleType)NULL)\r
+#define ModuleFunctionTypeNull ((ModuleFunctionType)NULL)\r
+\r
+//-----------------------------------------------------------------------------------\r
+// ***** OVR_MAX_PATH\r
+//\r
+#if !defined(OVR_MAX_PATH)\r
+#if defined(_WIN32)\r
+#define OVR_MAX_PATH _MAX_PATH\r
+#elif defined(__APPLE__)\r
+#define OVR_MAX_PATH PATH_MAX\r
+#else\r
+#define OVR_MAX_PATH 1024\r
+#endif\r
+#endif\r
+\r
+#if !defined(OVR_DLSYM)\r
+#if defined(_WIN32)\r
+#define OVR_DLSYM(dlImage, name) GetProcAddress(dlImage, name)\r
+#else\r
+#define OVR_DLSYM(dlImage, name) dlsym(dlImage, name)\r
+#endif\r
+#endif\r
+\r
+static size_t OVR_strlcpy(char* dest, const char* src, size_t destsize) {\r
+ const char* s = src;\r
+ size_t n = destsize;\r
+\r
+ if (n && --n) {\r
+ do {\r
+ if ((*dest++ = *s++) == 0)\r
+ break;\r
+ } while (--n);\r
+ }\r
+\r
+ if (!n) {\r
+ if (destsize)\r
+ *dest = 0;\r
+ while (*s++) {\r
+ }\r
+ }\r
+\r
+ return (size_t)((s - src) - 1);\r
+}\r
+\r
+static size_t OVR_strlcat(char* dest, const char* src, size_t destsize) {\r
+ const size_t d = destsize ? strlen(dest) : 0;\r
+ const size_t s = strlen(src);\r
+ const size_t t = s + d;\r
+\r
+ if (t < destsize)\r
+ memcpy(dest + d, src, (s + 1) * sizeof(*src));\r
+ else {\r
+ if (destsize) {\r
+ memcpy(dest + d, src, ((destsize - d) - 1) * sizeof(*src));\r
+ dest[destsize - 1] = 0;\r
+ }\r
+ }\r
+\r
+ return t;\r
+}\r
+\r
+#if defined(__APPLE__)\r
+static ovrBool\r
+OVR_strend(const char* pStr, const char* pFind, size_t strLength, size_t findLength) {\r
+ if (strLength == (size_t)-1)\r
+ strLength = strlen(pStr);\r
+ if (findLength == (size_t)-1)\r
+ findLength = strlen(pFind);\r
+ if (strLength >= findLength)\r
+ return (strcmp(pStr + strLength - findLength, pFind) == 0);\r
+ return ovrFalse;\r
+}\r
+\r
+static ovrBool OVR_isBundleFolder(const char* filePath) {\r
+ static const char* extensionArray[] = {".app", ".bundle", ".framework", ".plugin", ".kext"};\r
+ size_t i;\r
+\r
+ for (i = 0; i < sizeof(extensionArray) / sizeof(extensionArray[0]); i++) {\r
+ if (OVR_strend(filePath, extensionArray[i], (size_t)-1, (size_t)-1))\r
+ return ovrTrue;\r
+ }\r
+\r
+ return ovrFalse;\r
+}\r
+#endif\r
+\r
+#if defined(OVR_ENABLE_DEVELOPER_SEARCH)\r
+\r
+// Returns true if the path begins with the given prefix.\r
+// Doesn't support non-ASCII paths, else the return value may be incorrect.\r
+static int OVR_PathStartsWith(const FilePathCharType* path, const char* prefix) {\r
+ while (*prefix) {\r
+ if (tolower((unsigned char)*path++) != tolower((unsigned char)*prefix++))\r
+ return ovrFalse;\r
+ }\r
+\r
+ return ovrTrue;\r
+}\r
+\r
+#endif\r
+\r
+static ovrBool OVR_GetCurrentWorkingDirectory(\r
+ FilePathCharType* directoryPath,\r
+ size_t directoryPathCapacity) {\r
+#if defined(_WIN32)\r
+ DWORD dwSize = GetCurrentDirectoryW((DWORD)directoryPathCapacity, directoryPath);\r
+\r
+ if ((dwSize > 0) &&\r
+ (directoryPathCapacity > 1)) // Test > 1 so we have room to possibly append a \ char.\r
+ {\r
+ size_t length = wcslen(directoryPath);\r
+\r
+ if ((length == 0) ||\r
+ ((directoryPath[length - 1] != L'\\') && (directoryPath[length - 1] != L'/'))) {\r
+ directoryPath[length++] = L'\\';\r
+ directoryPath[length] = L'\0';\r
+ }\r
+\r
+ return ovrTrue;\r
+ }\r
+\r
+#else\r
+ char* cwd = getcwd(directoryPath, directoryPathCapacity);\r
+\r
+ if (cwd && directoryPath[0] &&\r
+ (directoryPathCapacity > 1)) // Test > 1 so we have room to possibly append a / char.\r
+ {\r
+ size_t length = strlen(directoryPath);\r
+\r
+ if ((length == 0) || (directoryPath[length - 1] != '/')) {\r
+ directoryPath[length++] = '/';\r
+ directoryPath[length] = '\0';\r
+ }\r
+\r
+ return ovrTrue;\r
+ }\r
+#endif\r
+\r
+ if (directoryPathCapacity > 0)\r
+ directoryPath[0] = '\0';\r
+\r
+ return ovrFalse;\r
+}\r
+\r
+// The appContainer argument is specific currently to only Macintosh. If true and the application is\r
+// a .app bundle then it returns the\r
+// location of the bundle and not the path to the executable within the bundle. Else return the path\r
+// to the executable binary itself.\r
+// The moduleHandle refers to the relevant dynamic (a.k.a. shared) library. The main executable is\r
+// the main module, and each of the shared\r
+// libraries is a module. This way you can specify that you want to know the directory of the given\r
+// shared library, which may be different\r
+// from the main executable. If the moduleHandle is NULL then the current application module is\r
+// used.\r
+static ovrBool OVR_GetCurrentApplicationDirectory(\r
+ FilePathCharType* directoryPath,\r
+ size_t directoryPathCapacity,\r
+ ovrBool appContainer,\r
+ ModuleHandleType moduleHandle) {\r
+#if defined(_WIN32)\r
+ DWORD length = GetModuleFileNameW(moduleHandle, directoryPath, (DWORD)directoryPathCapacity);\r
+ DWORD pos;\r
+\r
+ if ((length != 0) &&\r
+ (length <\r
+ (DWORD)directoryPathCapacity)) // If there wasn't an error and there was enough capacity...\r
+ {\r
+ for (pos = length; (pos > 0) && (directoryPath[pos] != '\\') && (directoryPath[pos] != '/');\r
+ --pos) {\r
+ if ((directoryPath[pos - 1] != '\\') && (directoryPath[pos - 1] != '/'))\r
+ directoryPath[pos - 1] = 0;\r
+ }\r
+\r
+ return ovrTrue;\r
+ }\r
+\r
+ (void)appContainer; // Not used on this platform.\r
+\r
+#elif defined(__APPLE__)\r
+ uint32_t directoryPathCapacity32 = (uint32_t)directoryPathCapacity;\r
+ int result = _NSGetExecutablePath(directoryPath, &directoryPathCapacity32);\r
+\r
+ if (result == 0) // If success...\r
+ {\r
+ char realPath[OVR_MAX_PATH];\r
+\r
+ if (realpath(directoryPath, realPath)) // realpath returns the canonicalized absolute file path.\r
+ {\r
+ size_t length = 0;\r
+\r
+ if (appContainer) // If the caller wants the path to the containing bundle...\r
+ {\r
+ char containerPath[OVR_MAX_PATH];\r
+ ovrBool pathIsContainer;\r
+\r
+ OVR_strlcpy(containerPath, realPath, sizeof(containerPath));\r
+ pathIsContainer = OVR_isBundleFolder(containerPath);\r
+\r
+ while (!pathIsContainer && strncmp(containerPath, ".", OVR_MAX_PATH) &&\r
+ strncmp(containerPath, "/", OVR_MAX_PATH)) // While the container we're looking for\r
+ // is not found and while the path doesn't\r
+ // start with a . or /\r
+ {\r
+ OVR_strlcpy(containerPath, dirname(containerPath), sizeof(containerPath));\r
+ pathIsContainer = OVR_isBundleFolder(containerPath);\r
+ }\r
+\r
+ if (pathIsContainer)\r
+ length = OVR_strlcpy(directoryPath, containerPath, directoryPathCapacity);\r
+ }\r
+\r
+ if (length == 0) // If not set above in the appContainer block...\r
+ length = OVR_strlcpy(directoryPath, realPath, directoryPathCapacity);\r
+\r
+ while (length-- && (directoryPath[length] != '/'))\r
+ directoryPath[length] =\r
+ '\0'; // Strip the file name from the file path, leaving a trailing / char.\r
+\r
+ return ovrTrue;\r
+ }\r
+ }\r
+\r
+ (void)moduleHandle; // Not used on this platform.\r
+\r
+#else\r
+ ssize_t length = readlink("/proc/self/exe", directoryPath, directoryPathCapacity);\r
+ ssize_t pos;\r
+\r
+ if (length > 0) {\r
+ for (pos = length; (pos > 0) && (directoryPath[pos] != '/'); --pos) {\r
+ if (directoryPath[pos - 1] != '/')\r
+ directoryPath[pos - 1] = '\0';\r
+ }\r
+\r
+ return ovrTrue;\r
+ }\r
+\r
+ (void)appContainer; // Not used on this platform.\r
+ (void)moduleHandle;\r
+#endif\r
+\r
+ if (directoryPathCapacity > 0)\r
+ directoryPath[0] = '\0';\r
+\r
+ return ovrFalse;\r
+}\r
+\r
+#if defined(_WIN32) || defined(OVR_ENABLE_DEVELOPER_SEARCH) // Used only in these cases\r
+\r
+// Get the file path to the current module's (DLL or EXE) directory within the current process.\r
+// Will be different from the process module handle if the current module is a DLL and is in a\r
+// different directory than the EXE module.\r
+// If successful then directoryPath will be valid and ovrTrue is returned, else directoryPath will\r
+// be empty and ovrFalse is returned.\r
+static ovrBool OVR_GetCurrentModuleDirectory(\r
+ FilePathCharType* directoryPath,\r
+ size_t directoryPathCapacity,\r
+ ovrBool appContainer) {\r
+#if defined(_WIN32)\r
+ HMODULE hModule;\r
+ BOOL result = GetModuleHandleExW(\r
+ GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS | GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,\r
+ (LPCWSTR)(uintptr_t)OVR_GetCurrentModuleDirectory,\r
+ &hModule);\r
+ if (result)\r
+ OVR_GetCurrentApplicationDirectory(directoryPath, directoryPathCapacity, ovrTrue, hModule);\r
+ else\r
+ directoryPath[0] = 0;\r
+\r
+ (void)appContainer;\r
+\r
+ return directoryPath[0] ? ovrTrue : ovrFalse;\r
+#else\r
+ return OVR_GetCurrentApplicationDirectory(\r
+ directoryPath, directoryPathCapacity, appContainer, NULL);\r
+#endif\r
+}\r
+\r
+#endif\r
+\r
+#if defined(_WIN32)\r
+\r
+#ifdef _MSC_VER\r
+#pragma warning(push)\r
+#pragma warning(disable : 4201)\r
+#endif\r
+\r
+#include <softpub.h>\r
+#include <wincrypt.h>\r
+\r
+#ifdef _MSC_VER\r
+#pragma warning(pop)\r
+#endif\r
+\r
+// Expected certificates:\r
+#define ExpectedNumCertificates 3\r
+typedef struct CertificateEntry_t {\r
+ const wchar_t* Issuer;\r
+ const wchar_t* Subject;\r
+} CertificateEntry;\r
+\r
+CertificateEntry NewCertificateChain[ExpectedNumCertificates] = {\r
+ {L"DigiCert SHA2 Assured ID Code Signing CA", L"Oculus VR, LLC"},\r
+ {L"DigiCert Assured ID Root CA", L"DigiCert SHA2 Assured ID Code Signing CA"},\r
+ {L"DigiCert Assured ID Root CA", L"DigiCert Assured ID Root CA"},\r
+};\r
+\r
+#define CertificateChainCount 1\r
+CertificateEntry* AllowedCertificateChains[CertificateChainCount] = {NewCertificateChain};\r
+\r
+typedef WINCRYPT32API DWORD(WINAPI* PtrCertGetNameStringW)(\r
+ PCCERT_CONTEXT pCertContext,\r
+ DWORD dwType,\r
+ DWORD dwFlags,\r
+ void* pvTypePara,\r
+ LPWSTR pszNameString,\r
+ DWORD cchNameString);\r
+typedef LONG(WINAPI* PtrWinVerifyTrust)(HWND hwnd, GUID* pgActionID, LPVOID pWVTData);\r
+typedef CRYPT_PROVIDER_DATA*(WINAPI* PtrWTHelperProvDataFromStateData)(HANDLE hStateData);\r
+typedef CRYPT_PROVIDER_SGNR*(WINAPI* PtrWTHelperGetProvSignerFromChain)(\r
+ CRYPT_PROVIDER_DATA* pProvData,\r
+ DWORD idxSigner,\r
+ BOOL fCounterSigner,\r
+ DWORD idxCounterSigner);\r
+\r
+PtrCertGetNameStringW m_PtrCertGetNameStringW = 0;\r
+PtrWinVerifyTrust m_PtrWinVerifyTrust = 0;\r
+PtrWTHelperProvDataFromStateData m_PtrWTHelperProvDataFromStateData = 0;\r
+PtrWTHelperGetProvSignerFromChain m_PtrWTHelperGetProvSignerFromChain = 0;\r
+\r
+typedef enum ValidateCertificateContentsResult_ {\r
+ VCCRSuccess = 0,\r
+ VCCRErrorCertCount = -1,\r
+ VCCRErrorTrust = -2,\r
+ VCCRErrorValidation = -3\r
+} ValidateCertificateContentsResult;\r
+\r
+static ValidateCertificateContentsResult ValidateCertificateContents(\r
+ CertificateEntry* chain,\r
+ CRYPT_PROVIDER_SGNR* cps) {\r
+ int certIndex;\r
+\r
+ if (!cps || !cps->pasCertChain || cps->csCertChain != ExpectedNumCertificates) {\r
+ return VCCRErrorCertCount;\r
+ }\r
+\r
+ for (certIndex = 0; certIndex < ExpectedNumCertificates; ++certIndex) {\r
+ CRYPT_PROVIDER_CERT* pCertData = &cps->pasCertChain[certIndex];\r
+ wchar_t subjectStr[400] = {0};\r
+ wchar_t issuerStr[400] = {0};\r
+\r
+ if ((pCertData->fSelfSigned && !pCertData->fTrustedRoot) || pCertData->fTestCert) {\r
+ return VCCRErrorTrust;\r
+ }\r
+\r
+ m_PtrCertGetNameStringW(\r
+ pCertData->pCert,\r
+ CERT_NAME_ATTR_TYPE,\r
+ 0,\r
+ szOID_COMMON_NAME,\r
+ subjectStr,\r
+ ARRAYSIZE(subjectStr));\r
+\r
+ m_PtrCertGetNameStringW(\r
+ pCertData->pCert,\r
+ CERT_NAME_ATTR_TYPE,\r
+ CERT_NAME_ISSUER_FLAG,\r
+ 0,\r
+ issuerStr,\r
+ ARRAYSIZE(issuerStr));\r
+\r
+ if (wcscmp(subjectStr, chain[certIndex].Subject) != 0 ||\r
+ wcscmp(issuerStr, chain[certIndex].Issuer) != 0) {\r
+ return VCCRErrorValidation;\r
+ }\r
+ }\r
+\r
+ return VCCRSuccess;\r
+}\r
+\r
+#define OVR_SIGNING_CONVERT_PTR(ftype, fptr, procaddr) \\r
+ { \\r
+ union { \\r
+ ftype p1; \\r
+ ModuleFunctionType p2; \\r
+ } u; \\r
+ u.p2 = procaddr; \\r
+ fptr = u.p1; \\r
+ }\r
+\r
+static BOOL OVR_Win32_SignCheck(FilePathCharType* fullPath, HANDLE hFile) {\r
+ WINTRUST_FILE_INFO fileData;\r
+ WINTRUST_DATA wintrustData;\r
+ GUID actionGUID = WINTRUST_ACTION_GENERIC_VERIFY_V2;\r
+ LONG resultStatus;\r
+ BOOL verified = FALSE;\r
+ HMODULE libWinTrust = LoadLibraryW(L"wintrust");\r
+ HMODULE libCrypt32 = LoadLibraryW(L"crypt32");\r
+ if (libWinTrust == NULL || libCrypt32 == NULL) {\r
+ return FALSE;\r
+ }\r
+\r
+ OVR_SIGNING_CONVERT_PTR(\r
+ PtrCertGetNameStringW,\r
+ m_PtrCertGetNameStringW,\r
+ GetProcAddress(libCrypt32, "CertGetNameStringW"));\r
+ OVR_SIGNING_CONVERT_PTR(\r
+ PtrWinVerifyTrust, m_PtrWinVerifyTrust, GetProcAddress(libWinTrust, "WinVerifyTrust"));\r
+ OVR_SIGNING_CONVERT_PTR(\r
+ PtrWTHelperProvDataFromStateData,\r
+ m_PtrWTHelperProvDataFromStateData,\r
+ GetProcAddress(libWinTrust, "WTHelperProvDataFromStateData"));\r
+ OVR_SIGNING_CONVERT_PTR(\r
+ PtrWTHelperGetProvSignerFromChain,\r
+ m_PtrWTHelperGetProvSignerFromChain,\r
+ GetProcAddress(libWinTrust, "WTHelperGetProvSignerFromChain"));\r
+\r
+ if (m_PtrCertGetNameStringW == NULL || m_PtrWinVerifyTrust == NULL ||\r
+ m_PtrWTHelperProvDataFromStateData == NULL || m_PtrWTHelperGetProvSignerFromChain == NULL) {\r
+ return FALSE;\r
+ }\r
+\r
+ if (hFile == INVALID_HANDLE_VALUE || fullPath == NULL) {\r
+ return FALSE;\r
+ }\r
+\r
+ ZeroMemory(&fileData, sizeof(fileData));\r
+ fileData.cbStruct = sizeof(fileData);\r
+ fileData.pcwszFilePath = fullPath;\r
+ fileData.hFile = hFile;\r
+\r
+ ZeroMemory(&wintrustData, sizeof(wintrustData));\r
+ wintrustData.cbStruct = sizeof(wintrustData);\r
+ wintrustData.pFile = &fileData;\r
+ wintrustData.dwUnionChoice = WTD_CHOICE_FILE; // Specify WINTRUST_FILE_INFO.\r
+ wintrustData.dwUIChoice = WTD_UI_NONE; // Do not display any UI.\r
+ wintrustData.dwUIContext = WTD_UICONTEXT_EXECUTE; // Hint that this is about app execution.\r
+ wintrustData.fdwRevocationChecks = WTD_REVOKE_NONE;\r
+ wintrustData.dwProvFlags = WTD_REVOCATION_CHECK_NONE;\r
+ wintrustData.dwStateAction = WTD_STATEACTION_VERIFY;\r
+ wintrustData.hWVTStateData = 0;\r
+\r
+ resultStatus = m_PtrWinVerifyTrust(\r
+ (HWND)INVALID_HANDLE_VALUE, // Do not display any UI.\r
+ &actionGUID, // V2 verification\r
+ &wintrustData);\r
+\r
+ if (resultStatus == ERROR_SUCCESS && wintrustData.hWVTStateData != 0 &&\r
+ wintrustData.hWVTStateData != INVALID_HANDLE_VALUE) {\r
+ CRYPT_PROVIDER_DATA* cpd = m_PtrWTHelperProvDataFromStateData(wintrustData.hWVTStateData);\r
+ if (cpd && cpd->csSigners == 1) {\r
+ CRYPT_PROVIDER_SGNR* cps = m_PtrWTHelperGetProvSignerFromChain(cpd, 0, FALSE, 0);\r
+ int chainIndex;\r
+ for (chainIndex = 0; chainIndex < CertificateChainCount; ++chainIndex) {\r
+ CertificateEntry* chain = AllowedCertificateChains[chainIndex];\r
+ if (VCCRSuccess == ValidateCertificateContents(chain, cps)) {\r
+ verified = TRUE;\r
+ break;\r
+ }\r
+ }\r
+ }\r
+ }\r
+\r
+ wintrustData.dwStateAction = WTD_STATEACTION_CLOSE;\r
+\r
+ m_PtrWinVerifyTrust(\r
+ (HWND)INVALID_HANDLE_VALUE, // Do not display any UI.\r
+ &actionGUID, // V2 verification\r
+ &wintrustData);\r
+\r
+ return verified;\r
+}\r
+\r
+#endif // #if defined(_WIN32)\r
+\r
+static ModuleHandleType OVR_OpenLibrary(const FilePathCharType* libraryPath, ovrResult* result) {\r
+#if defined(_WIN32)\r
+ DWORD fullPathNameLen = 0;\r
+ FilePathCharType fullPath[MAX_PATH] = {0};\r
+ HANDLE hFilePinned = INVALID_HANDLE_VALUE;\r
+ ModuleHandleType hModule = 0;\r
+\r
+ *result = ovrSuccess;\r
+\r
+ fullPathNameLen = GetFullPathNameW(libraryPath, MAX_PATH, fullPath, 0);\r
+ if (fullPathNameLen <= 0 || fullPathNameLen >= MAX_PATH) {\r
+ *result = ovrError_LibPath;\r
+ return NULL;\r
+ }\r
+\r
+ hFilePinned = CreateFileW(\r
+ fullPath, GENERIC_READ, FILE_SHARE_READ, 0, OPEN_EXISTING, FILE_ATTRIBUTE_READONLY, 0);\r
+\r
+ if (hFilePinned == INVALID_HANDLE_VALUE) {\r
+ *result = ovrError_LibPath;\r
+ return NULL;\r
+ }\r
+\r
+ if (!OVR_Win32_SignCheck(fullPath, hFilePinned)) {\r
+ *result = ovrError_LibSignCheck;\r
+ CloseHandle(hFilePinned);\r
+ return NULL;\r
+ }\r
+\r
+ hModule = LoadLibraryW(fullPath);\r
+\r
+ CloseHandle(hFilePinned);\r
+\r
+ if (hModule == NULL) {\r
+ *result = ovrError_LibLoad;\r
+ }\r
+\r
+ return hModule;\r
+#else\r
+ *result = ovrSuccess;\r
+\r
+ // Don't bother trying to dlopen() a file that is not even there.\r
+ if (access(libraryPath, X_OK | R_OK) != 0) {\r
+ *result = ovrError_LibPath;\r
+ return NULL;\r
+ }\r
+\r
+ dlerror(); // Clear any previous dlopen() errors\r
+\r
+ // Use RTLD_NOW because we don't want unexpected stalls at runtime, and the library isn't very\r
+ // large.\r
+ // Use RTLD_LOCAL to avoid unilaterally exporting resolved symbols to the rest of this process.\r
+ void* lib = dlopen(libraryPath, RTLD_NOW | RTLD_LOCAL);\r
+\r
+ if (!lib) {\r
+ fprintf(stderr, "ERROR: Can't load '%s':\n%s\n", libraryPath, dlerror());\r
+ }\r
+\r
+ return lib;\r
+#endif\r
+}\r
+\r
+static void OVR_CloseLibrary(ModuleHandleType hLibrary) {\r
+ if (hLibrary) {\r
+#if defined(_WIN32)\r
+ // We may need to consider what to do in the case that the library is in an exception state.\r
+ // In a Windows C++ DLL, all global objects (including static members of classes) will be\r
+ // constructed just\r
+ // before the calling of the DllMain with DLL_PROCESS_ATTACH and they will be destroyed just\r
+ // after\r
+ // the call of the DllMain with DLL_PROCESS_DETACH. We may need to intercept DLL_PROCESS_DETACH\r
+ // and\r
+ // have special handling for the case that the DLL is broken.\r
+ FreeLibrary(hLibrary);\r
+#else\r
+ dlclose(hLibrary);\r
+#endif\r
+ }\r
+}\r
+\r
+// Returns a valid ModuleHandleType (e.g. Windows HMODULE) or returns ModuleHandleTypeNull (e.g.\r
+// NULL).\r
+// The caller is required to eventually call OVR_CloseLibrary on a valid return handle.\r
+//\r
+static ModuleHandleType OVR_FindLibraryPath(\r
+ int requestedProductVersion,\r
+ int requestedMajorVersion,\r
+ FilePathCharType* libraryPath,\r
+ size_t libraryPathCapacity,\r
+ ovrResult* result) {\r
+ ModuleHandleType moduleHandle;\r
+ int printfResult;\r
+ FilePathCharType developerDir[OVR_MAX_PATH] = {'\0'};\r
+\r
+#if defined(_MSC_VER)\r
+#if defined(_WIN64)\r
+ const char* pBitDepth = "64";\r
+#else\r
+ const char* pBitDepth = "32";\r
+#endif\r
+#elif defined(__APPLE__)\r
+// For Apple platforms we are using a Universal Binary LibOVRRT dylib which has both 32 and 64 in\r
+// it.\r
+#else // Other Unix.\r
+#if defined(__x86_64__)\r
+ const char* pBitDepth = "64";\r
+#else\r
+ const char* pBitDepth = "32";\r
+#endif\r
+#endif\r
+\r
+ (void)requestedProductVersion;\r
+\r
+ *result = ovrError_LibLoad;\r
+ moduleHandle = ModuleHandleTypeNull;\r
+ if (libraryPathCapacity)\r
+ libraryPath[0] = '\0';\r
+\r
+// Note: OVR_ENABLE_DEVELOPER_SEARCH is deprecated in favor of the simpler LIBOVR_DLL_DIR, as the\r
+// edge\r
+// case uses of the former created some complications that may be best solved by simply using a\r
+// LIBOVR_DLL_DIR\r
+// environment variable which the user can set in their debugger or system environment variables.\r
+#if (defined(_MSC_VER) || defined(_WIN32)) && !defined(OVR_FILE_PATH_SEPARATOR)\r
+#define OVR_FILE_PATH_SEPARATOR "\\"\r
+#else\r
+#define OVR_FILE_PATH_SEPARATOR "/"\r
+#endif\r
+\r
+ {\r
+ const char* pLibOvrDllDir =\r
+ getenv("LIBOVR_DLL_DIR"); // Example value: /dev/OculusSDK/Main/LibOVR/Mac/Debug/\r
+\r
+ if (pLibOvrDllDir) {\r
+ char developerDir8[OVR_MAX_PATH];\r
+ size_t length = OVR_strlcpy(\r
+ developerDir8,\r
+ pLibOvrDllDir,\r
+ sizeof(developerDir8)); // If missing a trailing path separator then append one.\r
+\r
+ if ((length > 0) && (length < sizeof(developerDir8)) &&\r
+ (developerDir8[length - 1] != OVR_FILE_PATH_SEPARATOR[0])) {\r
+ length = OVR_strlcat(developerDir8, OVR_FILE_PATH_SEPARATOR, sizeof(developerDir8));\r
+\r
+ if (length < sizeof(developerDir8)) {\r
+#if defined(_WIN32)\r
+ size_t i;\r
+ for (i = 0; i <= length; ++i) // ASCII conversion of 8 to 16 bit text.\r
+ developerDir[i] = (FilePathCharType)(uint8_t)developerDir8[i];\r
+#else\r
+ OVR_strlcpy(developerDir, developerDir8, sizeof(developerDir));\r
+#endif\r
+ }\r
+ }\r
+ }\r
+ }\r
+\r
+// Support checking for a developer library location override via the OVR_SDK_ROOT environment\r
+// variable.\r
+// This pathway is deprecated in favor of using LIBOVR_DLL_DIR instead.\r
+#if defined(OVR_ENABLE_DEVELOPER_SEARCH)\r
+ if (!developerDir[0]) // If not already set by LIBOVR_DLL_PATH...\r
+ {\r
+ // __FILE__ maps to <sdkRoot>/LibOVR/Src/OVR_CAPIShim.c\r
+ char sdkRoot[OVR_MAX_PATH];\r
+ char* pLibOVR;\r
+ size_t i;\r
+\r
+ // We assume that __FILE__ returns a full path, which isn't the case for some compilers.\r
+ // Need to compile with /FC under VC++ for __FILE__ to expand to the full file path.\r
+ // NOTE: This needs to be fixed on Mac. __FILE__ is not expanded to full path under clang.\r
+ OVR_strlcpy(sdkRoot, __FILE__, sizeof(sdkRoot));\r
+ for (i = 0; sdkRoot[i]; ++i)\r
+ sdkRoot[i] = (char)tolower(sdkRoot[i]); // Microsoft doesn't maintain case.\r
+ pLibOVR = strstr(sdkRoot, "libovr");\r
+ if (pLibOVR && (pLibOVR > sdkRoot))\r
+ pLibOVR[-1] = '\0';\r
+ else\r
+ sdkRoot[0] = '\0';\r
+\r
+ if (sdkRoot[0]) {\r
+ // We want to use a developer version of the library only if the application is also being\r
+ // executed from\r
+ // a developer location. Ideally we would do this by checking that the relative path from the\r
+ // executable to\r
+ // the shared library is the same at runtime as it was when the executable was first built,\r
+ // but we don't have\r
+ // an easy way to do that from here and it would require some runtime help from the\r
+ // application code.\r
+ // Instead we verify that the application is simply in the same developer tree that was was\r
+ // when built.\r
+ // We could put in some additional logic to make it very likely to know if the EXE is in its\r
+ // original location.\r
+ FilePathCharType modulePath[OVR_MAX_PATH];\r
+ const ovrBool pathMatch = OVR_GetCurrentModuleDirectory(modulePath, OVR_MAX_PATH, ovrTrue) &&\r
+ (OVR_PathStartsWith(modulePath, sdkRoot) == ovrTrue);\r
+ if (pathMatch == ovrFalse) {\r
+ sdkRoot[0] = '\0'; // The application module is not in the developer tree, so don't try to\r
+ // use the developer shared library.\r
+ }\r
+ }\r
+\r
+ if (sdkRoot[0]) {\r
+\r
+#ifndef CONFIG_VARIANT\r
+#define CONFIG_VARIANT\r
+#endif\r
+\r
+#if defined(OVR_BUILD_DEBUG)\r
+ const char* pConfigDirName = "Debug" CONFIG_VARIANT;\r
+#else\r
+ const char* pConfigDirName = "Release" CONFIG_VARIANT;\r
+#endif\r
+\r
+#undef CONFIG_VARIANT\r
+\r
+#if defined(_MSC_VER)\r
+#if defined(_WIN64)\r
+ const char* pArchDirName = "x64";\r
+#else\r
+ const char* pArchDirName = "Win32";\r
+#endif\r
+#else\r
+#if defined(__x86_64__)\r
+ const char* pArchDirName = "x86_64";\r
+#else\r
+ const char* pArchDirName = "i386";\r
+#endif\r
+#endif\r
+\r
+#if defined(_MSC_VER) && (_MSC_VER == 1600)\r
+ const char* pCompilerVersion = "VS2010";\r
+#elif defined(_MSC_VER) && (_MSC_VER == 1700)\r
+ const char* pCompilerVersion = "VS2012";\r
+#elif defined(_MSC_VER) && (_MSC_VER == 1800)\r
+ const char* pCompilerVersion = "VS2013";\r
+#elif defined(_MSC_VER) && (_MSC_VER >= 1900)\r
+ const char* pCompilerVersion = "VS2015";\r
+#endif\r
+\r
+#if defined(_WIN32)\r
+ int count = swprintf_s(\r
+ developerDir,\r
+ OVR_MAX_PATH,\r
+ L"%hs\\LibOVR\\Lib\\Windows\\%hs\\%hs\\%hs\\",\r
+ sdkRoot,\r
+ pArchDirName,\r
+ pConfigDirName,\r
+ pCompilerVersion);\r
+#elif defined(__APPLE__)\r
+ // Apple/XCode doesn't let you specify an arch in build paths, which is OK if we build a\r
+ // universal binary.\r
+ (void)pArchDirName;\r
+ int count =\r
+ snprintf(developerDir, OVR_MAX_PATH, "%s/LibOVR/Lib/Mac/%s/", sdkRoot, pConfigDirName);\r
+#else\r
+ int count = snprintf(\r
+ developerDir,\r
+ OVR_MAX_PATH,\r
+ "%s/LibOVR/Lib/Linux/%s/%s/",\r
+ sdkRoot,\r
+ pArchDirName,\r
+ pConfigDirName);\r
+#endif\r
+\r
+ if ((count < 0) ||\r
+ (count >=\r
+ (int)OVR_MAX_PATH)) // If there was an error or capacity overflow... clear the string.\r
+ {\r
+ developerDir[0] = '\0';\r
+ }\r
+ }\r
+ }\r
+#endif // OVR_ENABLE_DEVELOPER_SEARCH\r
+\r
+ {\r
+#if !defined(_WIN32)\r
+ FilePathCharType cwDir[OVR_MAX_PATH]; // Will be filled in below.\r
+ FilePathCharType appDir[OVR_MAX_PATH];\r
+#endif\r
+ size_t i;\r
+\r
+#if defined(_WIN32)\r
+ // On Windows, only search the developer directory and the usual path\r
+ const FilePathCharType* directoryArray[2];\r
+ directoryArray[0] = developerDir; // Developer directory.\r
+ directoryArray[1] = L""; // No directory, which causes Windows to use the standard search\r
+ // strategy to find the DLL.\r
+\r
+#elif defined(__APPLE__)\r
+ // https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man1/dyld.1.html\r
+\r
+ FilePathCharType homeDir[OVR_MAX_PATH];\r
+ FilePathCharType homeFrameworkDir[OVR_MAX_PATH];\r
+ const FilePathCharType* directoryArray[5];\r
+ size_t homeDirLength = 0;\r
+\r
+ const char* pHome = getenv("HOME"); // Try getting the HOME environment variable.\r
+\r
+ if (pHome) {\r
+ homeDirLength = OVR_strlcpy(homeDir, pHome, sizeof(homeDir));\r
+ } else {\r
+ // https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man3/getpwuid_r.3.html\r
+ const long pwBufferSize = sysconf(_SC_GETPW_R_SIZE_MAX);\r
+\r
+ if (pwBufferSize != -1) {\r
+ char pwBuffer[pwBufferSize];\r
+ struct passwd pw;\r
+ struct passwd* pwResult = NULL;\r
+\r
+ if ((getpwuid_r(getuid(), &pw, pwBuffer, pwBufferSize, &pwResult) == 0) && pwResult)\r
+ homeDirLength = OVR_strlcpy(homeDir, pw.pw_dir, sizeof(homeDir));\r
+ }\r
+ }\r
+\r
+ if (homeDirLength) {\r
+ if (homeDir[homeDirLength - 1] == '/')\r
+ homeDir[homeDirLength - 1] = '\0';\r
+ OVR_strlcpy(homeFrameworkDir, homeDir, sizeof(homeFrameworkDir));\r
+ OVR_strlcat(homeFrameworkDir, "/Library/Frameworks/", sizeof(homeFrameworkDir));\r
+ } else {\r
+ homeFrameworkDir[0] = '\0';\r
+ }\r
+\r
+ directoryArray[0] = cwDir;\r
+ directoryArray[1] = appDir;\r
+ directoryArray[2] = homeFrameworkDir; // ~/Library/Frameworks/\r
+ directoryArray[3] = "/Library/Frameworks/"; // DYLD_FALLBACK_FRAMEWORK_PATH\r
+ directoryArray[4] = developerDir; // Developer directory.\r
+\r
+#else\r
+#define STR1(x) #x\r
+#define STR(x) STR1(x)\r
+#ifdef LIBDIR\r
+#define TEST_LIB_DIR STR(LIBDIR) "/"\r
+#else\r
+#define TEST_LIB_DIR appDir\r
+#endif\r
+\r
+ const FilePathCharType* directoryArray[5];\r
+ directoryArray[0] = cwDir;\r
+ directoryArray[1] = TEST_LIB_DIR; // Directory specified by LIBDIR if defined.\r
+ directoryArray[2] = developerDir; // Developer directory.\r
+ directoryArray[3] = "/usr/local/lib/";\r
+ directoryArray[4] = "/usr/lib/";\r
+#endif\r
+\r
+#if !defined(_WIN32)\r
+ OVR_GetCurrentWorkingDirectory(cwDir, sizeof(cwDir) / sizeof(cwDir[0]));\r
+ OVR_GetCurrentApplicationDirectory(appDir, sizeof(appDir) / sizeof(appDir[0]), ovrTrue, NULL);\r
+#endif\r
+\r
+ // Versioned file expectations.\r
+ // Windows: LibOVRRT<BIT_DEPTH>_<PRODUCT_VERSION>_<MAJOR_VERSION>.dll\r
+ // // Example: LibOVRRT64_1_1.dll -- LibOVRRT 64 bit, product 1, major version 1,\r
+ // minor/patch/build numbers unspecified in the name.\r
+ // Mac:\r
+ // LibOVRRT_<PRODUCT_VERSION>.framework/Versions/<MAJOR_VERSION>/LibOVRRT_<PRODUCT_VERSION>\r
+ // // We are not presently using the .framework bundle's Current directory to hold the\r
+ // version number. This may change.\r
+ // Linux: libOVRRT<BIT_DEPTH>_<PRODUCT_VERSION>.so.<MAJOR_VERSION>\r
+ // // The file on disk may contain a minor version number, but a symlink is used to map this\r
+ // major-only version to it.\r
+\r
+ // Since we are manually loading the LibOVR dynamic library, we need to look in various\r
+ // locations for a file\r
+ // that matches our requirements. The functionality required is somewhat similar to the\r
+ // operating system's\r
+ // dynamic loader functionality. Each OS has some differences in how this is handled.\r
+ // Future versions of this may iterate over all libOVRRT.so.* files in the directory and use the\r
+ // one that matches our requirements.\r
+ //\r
+ // We need to look for a library that matches the product version and major version of the\r
+ // caller's request,\r
+ // and that library needs to support a minor version that is >= the requested minor version.\r
+ // Currently we\r
+ // don't test the minor version here, as the library is named based only on the product and\r
+ // major version.\r
+ // Currently the minor version test is handled via the initialization of the library and the\r
+ // initialization\r
+ // fails if minor version cannot be supported by the library. The reason this is done during\r
+ // initialization\r
+ // is that the library can at runtime support multiple minor versions based on the user's\r
+ // request. To the\r
+ // external user, all that matters it that they call ovr_Initialize with a requested version and\r
+ // it succeeds\r
+ // or fails.\r
+ //\r
+ // The product version is something that is at a higher level than the major version, and is not\r
+ // something that's\r
+ // always seen in libraries (an example is the well-known LibXml2 library, in which the 2 is\r
+ // essentially the product version).\r
+\r
+ for (i = 0; i < sizeof(directoryArray) / sizeof(directoryArray[0]); ++i) {\r
+#if defined(_WIN32)\r
+ printfResult = swprintf(\r
+ libraryPath,\r
+ libraryPathCapacity,\r
+ L"%lsLibOVRRT%hs_%d.dll",\r
+ directoryArray[i],\r
+ pBitDepth,\r
+ requestedMajorVersion);\r
+\r
+ if (*directoryArray[i] == 0) {\r
+ int k;\r
+ FilePathCharType foundPath[MAX_PATH] = {0};\r
+ DWORD searchResult = SearchPathW(NULL, libraryPath, NULL, MAX_PATH, foundPath, NULL);\r
+ if (searchResult <= 0 || searchResult >= libraryPathCapacity) {\r
+ continue;\r
+ }\r
+ foundPath[MAX_PATH - 1] = 0;\r
+ for (k = 0; k < MAX_PATH; ++k) {\r
+ libraryPath[k] = foundPath[k];\r
+ }\r
+ }\r
+\r
+#elif defined(__APPLE__)\r
+ // https://developer.apple.com/library/mac/documentation/MacOSX/Conceptual/BPFrameworks/Concepts/VersionInformation.html\r
+ // Macintosh application bundles have the option of embedding dependent frameworks within the\r
+ // application\r
+ // bundle itself. A problem with that is that it doesn't support vendor-supplied updates to\r
+ // the framework.\r
+ printfResult =\r
+ snprintf(libraryPath, libraryPathCapacity, "%sLibOVRRT.dylib", directoryArray[i]);\r
+\r
+#else // Unix\r
+ // Applications that depend on the OS (e.g. ld-linux / ldd) can rely on the library being in a\r
+ // common location\r
+ // such as /usr/lib or can rely on the -rpath linker option to embed a path for the OS to\r
+ // check for the library,\r
+ // or can rely on the LD_LIBRARY_PATH environment variable being set. It's generally not\r
+ // recommended that applications\r
+ // depend on LD_LIBRARY_PATH be globally modified, partly due to potentialy security issues.\r
+ // Currently we check the current application directory, current working directory, and then\r
+ // /usr/lib and possibly others.\r
+ printfResult = snprintf(\r
+ libraryPath,\r
+ libraryPathCapacity,\r
+ "%slibOVRRT%s.so.%d",\r
+ directoryArray[i],\r
+ pBitDepth,\r
+ requestedMajorVersion);\r
+#endif\r
+\r
+ if ((printfResult >= 0) && (printfResult < (int)libraryPathCapacity)) {\r
+ moduleHandle = OVR_OpenLibrary(libraryPath, result);\r
+ if (moduleHandle != ModuleHandleTypeNull)\r
+ return moduleHandle;\r
+ }\r
+ }\r
+ }\r
+\r
+ return moduleHandle;\r
+}\r
+\r
+//-----------------------------------------------------------------------------------\r
+// ***** hLibOVR\r
+//\r
+// global handle to the LivOVR shared library.\r
+//\r
+static ModuleHandleType hLibOVR = NULL;\r
+\r
+// This function is currently unsupported.\r
+ModuleHandleType ovr_GetLibOVRRTHandle() {\r
+ return hLibOVR;\r
+}\r
+\r
+//-----------------------------------------------------------------------------------\r
+// ***** Function declarations\r
+//\r
+\r
+//-----------------------------------------------------------------------------------\r
+// ***** OVR_DECLARE_IMPORT\r
+//\r
+// Creates a pointer and loader value union for each entry in OVR_LIST_APIS()\r
+//\r
+\r
+#define OVR_DECLARE_IMPORT(ReturnValue, FunctionName, OptionalVersion, Arguments) \\r
+ union { \\r
+ ReturnValue(OVR_CDECL* Ptr) Arguments; \\r
+ ModuleFunctionType Symbol; \\r
+ } FunctionName;\r
+\r
+#define OVR_IGNORE_IMPORT(ReturnValue, FunctionName, OptionalVersion, Arguments)\r
+\r
+//-----------------------------------------------------------------------------------\r
+// ***** API - a structure with each API entrypoint as a FunctionName.Ptr and FunctionName.Symbol\r
+// union\r
+//\r
+\r
+static struct { OVR_LIST_APIS(OVR_DECLARE_IMPORT, OVR_IGNORE_IMPORT) } API = {{NULL}};\r
+\r
+static void OVR_UnloadSharedLibrary() {\r
+ memset(&API, 0, sizeof(API));\r
+ if (hLibOVR)\r
+ OVR_CloseLibrary(hLibOVR);\r
+ hLibOVR = NULL;\r
+}\r
+\r
+static ovrResult OVR_LoadSharedLibrary(int requestedProductVersion, int requestedMajorVersion) {\r
+ FilePathCharType filePath[OVR_MAX_PATH];\r
+ const char* SymbolName = NULL;\r
+ ovrResult result = ovrSuccess;\r
+\r
+ if (hLibOVR)\r
+ return result;\r
+\r
+ hLibOVR = OVR_FindLibraryPath(\r
+ requestedProductVersion,\r
+ requestedMajorVersion,\r
+ filePath,\r
+ sizeof(filePath) / sizeof(filePath[0]),\r
+ &result);\r
+\r
+ if (!hLibOVR)\r
+ return result;\r
+\r
+ // Zero the API table just to be paranoid\r
+ memset(&API, 0, sizeof(API));\r
+\r
+// Load the current API entrypoint using the catenated FunctionName and OptionalVersion\r
+#define OVR_GETFUNCTION(ReturnValue, FunctionName, OptionalVersion, Arguments) \\r
+ SymbolName = #FunctionName #OptionalVersion; \\r
+ API.FunctionName.Symbol = OVR_DLSYM(hLibOVR, SymbolName); \\r
+ if (!API.FunctionName.Symbol) { \\r
+ fprintf(stderr, "Unable to locate symbol: %s\n", SymbolName); \\r
+ result = ovrError_LibSymbols; \\r
+ goto FailedToLoadSymbol; \\r
+ }\r
+\r
+ OVR_LIST_APIS(OVR_GETFUNCTION, OVR_IGNORE_IMPORT)\r
+\r
+#undef OVR_GETFUNCTION\r
+\r
+ return result;\r
+\r
+FailedToLoadSymbol:\r
+ // Check SymbolName for the name of the API which failed to load\r
+ OVR_UnloadSharedLibrary();\r
+ return result;\r
+}\r
+\r
+// These defaults are also in CAPI.cpp\r
+static const ovrInitParams DefaultParams = {\r
+ ovrInit_RequestVersion, // Flags\r
+ OVR_MINOR_VERSION, // RequestedMinorVersion\r
+ 0, // LogCallback\r
+ 0, // UserData\r
+ 0, // ConnectionTimeoutSeconds\r
+ OVR_ON64("") // pad0\r
+};\r
+\r
+// Don't put this on the heap\r
+static ovrErrorInfo LastInitializeErrorInfo = {ovrError_NotInitialized,\r
+ "ovr_Initialize never called"};\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_Initialize(const ovrInitParams* inputParams) {\r
+ ovrResult result;\r
+ ovrInitParams params;\r
+\r
+ typedef void(OVR_CDECL * ovr_ReportClientInfoType)(\r
+ unsigned int compilerVersion,\r
+ int productVersion,\r
+ int majorVersion,\r
+ int minorVersion,\r
+ int patchVersion,\r
+ int buildNumber);\r
+ ovr_ReportClientInfoType reportClientInfo;\r
+\r
+ // Do something with our version signature hash to prevent\r
+ // it from being optimized out. In this case, compute\r
+ // a cheap CRC.\r
+ uint8_t crc = 0;\r
+ size_t i;\r
+\r
+ for (i = 0; i < (sizeof(OculusSDKUniqueIdentifier) - 3);\r
+ ++i) // Minus 3 because we have trailing OVR_MAJOR_VERSION, OVR_MINOR_VERSION,\r
+ // OVR_PATCH_VERSION which vary per version.\r
+ {\r
+ crc ^= OculusSDKUniqueIdentifier[i];\r
+ }\r
+\r
+ assert(crc == OculusSDKUniqueIdentifierXORResult);\r
+ if (crc != OculusSDKUniqueIdentifierXORResult) {\r
+ return ovrError_Initialize;\r
+ }\r
+\r
+ if (!inputParams) {\r
+ params = DefaultParams;\r
+ } else {\r
+ params = *inputParams;\r
+\r
+ // If not requesting a particular minor version,\r
+ if (!(params.Flags & ovrInit_RequestVersion)) {\r
+ // Enable requesting the default minor version.\r
+ params.Flags |= ovrInit_RequestVersion;\r
+ params.RequestedMinorVersion = OVR_MINOR_VERSION;\r
+ }\r
+ }\r
+\r
+ // Clear non-writable bits provided by client code.\r
+ params.Flags &= ovrinit_WritableBits;\r
+\r
+\r
+\r
+ // Error out if the requested minor version is less than our lowest deemed compatible version\r
+ // denoted by OVR_MIN_REQUESTABLE_MINOR_VERSION.\r
+ // Note: This code has to be in the shim as we want to enforce usage of the new API versions for\r
+ // applications being recompiled while maintaining backwards compatibility with older apps\r
+ if (params.RequestedMinorVersion < OVR_MIN_REQUESTABLE_MINOR_VERSION) {\r
+ // Requested LibOVRRT version too low\r
+ result = ovrError_LibVersion;\r
+ return result;\r
+ }\r
+\r
+ // By design we ignore the build version in the library search.\r
+ result = OVR_LoadSharedLibrary(OVR_PRODUCT_VERSION, OVR_MAJOR_VERSION);\r
+ if (result != ovrSuccess)\r
+ return result;\r
+\r
+ result = API.ovr_Initialize.Ptr(¶ms);\r
+\r
+ if (result != ovrSuccess) {\r
+ // Stash the last initialization error for the shim to return if\r
+ // ovr_GetLastErrorInfo is called after we unload the dll below\r
+ if (API.ovr_GetLastErrorInfo.Ptr) {\r
+ API.ovr_GetLastErrorInfo.Ptr(&LastInitializeErrorInfo);\r
+ }\r
+ OVR_UnloadSharedLibrary();\r
+ }\r
+\r
+ reportClientInfo =\r
+ (ovr_ReportClientInfoType)(uintptr_t)OVR_DLSYM(hLibOVR, "ovr_ReportClientInfo");\r
+\r
+ if (reportClientInfo) {\r
+ unsigned int mscFullVer = 0;\r
+#if defined(_MSC_FULL_VER)\r
+ mscFullVer = _MSC_FULL_VER;\r
+#endif // _MSC_FULL_VER\r
+\r
+ reportClientInfo(\r
+ mscFullVer,\r
+ OVR_PRODUCT_VERSION,\r
+ OVR_MAJOR_VERSION,\r
+ OVR_MINOR_VERSION,\r
+ OVR_PATCH_VERSION,\r
+ OVR_BUILD_NUMBER);\r
+ }\r
+\r
+ return result;\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(void) ovr_Shutdown() {\r
+ if (!API.ovr_Shutdown.Ptr)\r
+ return;\r
+ API.ovr_Shutdown.Ptr();\r
+ OVR_UnloadSharedLibrary();\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(const char*) ovr_GetVersionString() {\r
+ // We don't directly return the value of the DLL API.ovr_GetVersionString.Ptr call,\r
+ // because that call returns a pointer to memory within the DLL. If the DLL goes\r
+ // away then that pointer becomes invalid while the process may still be holding\r
+ // onto it. So we save a local copy of it which is always valid.\r
+ static char dllVersionStringLocal[32];\r
+ const char* dllVersionString;\r
+\r
+ if (!API.ovr_GetVersionString.Ptr)\r
+ return "(Unable to load LibOVR)";\r
+\r
+ dllVersionString = API.ovr_GetVersionString.Ptr(); // Guaranteed to always be valid.\r
+ assert(dllVersionString != NULL);\r
+ OVR_strlcpy(dllVersionStringLocal, dllVersionString, sizeof(dllVersionStringLocal));\r
+\r
+ return dllVersionStringLocal;\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(void) ovr_GetLastErrorInfo(ovrErrorInfo* errorInfo) {\r
+ if (!API.ovr_GetLastErrorInfo.Ptr) {\r
+ *errorInfo = LastInitializeErrorInfo;\r
+ } else\r
+ API.ovr_GetLastErrorInfo.Ptr(errorInfo);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrHmdDesc) ovr_GetHmdDesc(ovrSession session) {\r
+ if (!API.ovr_GetHmdDesc.Ptr) {\r
+ ovrHmdDesc hmdDesc;\r
+ memset(&hmdDesc, 0, sizeof(hmdDesc));\r
+ hmdDesc.Type = ovrHmd_None;\r
+ return hmdDesc;\r
+ }\r
+\r
+ return API.ovr_GetHmdDesc.Ptr(session);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(unsigned int) ovr_GetTrackerCount(ovrSession session) {\r
+ if (!API.ovr_GetTrackerCount.Ptr) {\r
+ return 0;\r
+ }\r
+\r
+ return API.ovr_GetTrackerCount.Ptr(session);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrTrackerDesc)\r
+ovr_GetTrackerDesc(ovrSession session, unsigned int trackerDescIndex) {\r
+ if (!API.ovr_GetTrackerDesc.Ptr) {\r
+ ovrTrackerDesc trackerDesc;\r
+ memset(&trackerDesc, 0, sizeof(trackerDesc));\r
+ return trackerDesc;\r
+ }\r
+\r
+ return API.ovr_GetTrackerDesc.Ptr(session, trackerDescIndex);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_Create(ovrSession* pSession, ovrGraphicsLuid* pLuid) {\r
+ if (!API.ovr_Create.Ptr)\r
+ return ovrError_NotInitialized;\r
+ return API.ovr_Create.Ptr(pSession, pLuid);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(void) ovr_Destroy(ovrSession session) {\r
+ if (!API.ovr_Destroy.Ptr)\r
+ return;\r
+ API.ovr_Destroy.Ptr(session);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_GetSessionStatus(ovrSession session, ovrSessionStatus* sessionStatus) {\r
+ if (!API.ovr_GetSessionStatus.Ptr) {\r
+ if (sessionStatus) {\r
+ sessionStatus->IsVisible = ovrFalse;\r
+ sessionStatus->HmdPresent = ovrFalse;\r
+ sessionStatus->HmdMounted = ovrFalse;\r
+ sessionStatus->ShouldQuit = ovrFalse;\r
+ sessionStatus->DisplayLost = ovrFalse;\r
+ sessionStatus->ShouldRecenter = ovrFalse;\r
+ sessionStatus->HasInputFocus = ovrFalse;\r
+ sessionStatus->OverlayPresent = ovrFalse;\r
+ sessionStatus->DepthRequested = ovrFalse;\r
+ }\r
+\r
+ return ovrError_NotInitialized;\r
+ }\r
+\r
+ return API.ovr_GetSessionStatus.Ptr(session, sessionStatus);\r
+}\r
+\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_IsExtensionSupported(ovrSession session, ovrExtensions extension, ovrBool* extensionSupported) {\r
+ if (!API.ovr_IsExtensionSupported.Ptr)\r
+ return ovrError_NotInitialized;\r
+ return API.ovr_IsExtensionSupported.Ptr(session, extension, extensionSupported);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_EnableExtension(ovrSession session, ovrExtensions extension) {\r
+ if (!API.ovr_EnableExtension.Ptr)\r
+ return ovrError_NotInitialized;\r
+ return API.ovr_EnableExtension.Ptr(session, extension);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_SetTrackingOriginType(ovrSession session, ovrTrackingOrigin origin) {\r
+ if (!API.ovr_SetTrackingOriginType.Ptr)\r
+ return ovrError_NotInitialized;\r
+ return API.ovr_SetTrackingOriginType.Ptr(session, origin);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrTrackingOrigin) ovr_GetTrackingOriginType(ovrSession session) {\r
+ if (!API.ovr_GetTrackingOriginType.Ptr)\r
+ return ovrTrackingOrigin_EyeLevel;\r
+ return API.ovr_GetTrackingOriginType.Ptr(session);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_RecenterTrackingOrigin(ovrSession session) {\r
+ if (!API.ovr_RecenterTrackingOrigin.Ptr)\r
+ return ovrError_NotInitialized;\r
+ return API.ovr_RecenterTrackingOrigin.Ptr(session);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_SpecifyTrackingOrigin(ovrSession session, ovrPosef originPose) {\r
+ if (!API.ovr_SpecifyTrackingOrigin.Ptr)\r
+ return ovrError_NotInitialized;\r
+ return API.ovr_SpecifyTrackingOrigin.Ptr(session, originPose);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(void) ovr_ClearShouldRecenterFlag(ovrSession session) {\r
+ if (!API.ovr_ClearShouldRecenterFlag.Ptr)\r
+ return;\r
+ API.ovr_ClearShouldRecenterFlag.Ptr(session);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrTrackingState)\r
+ovr_GetTrackingState(ovrSession session, double absTime, ovrBool latencyMarker) {\r
+ if (!API.ovr_GetTrackingState.Ptr) {\r
+ ovrTrackingState nullTrackingState;\r
+ memset(&nullTrackingState, 0, sizeof(nullTrackingState));\r
+ return nullTrackingState;\r
+ }\r
+\r
+ return API.ovr_GetTrackingState.Ptr(session, absTime, latencyMarker);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_GetDevicePoses(\r
+ ovrSession session,\r
+ ovrTrackedDeviceType* deviceTypes,\r
+ int deviceCount,\r
+ double absTime,\r
+ ovrPoseStatef* outDevicePoses) {\r
+ if (!API.ovr_GetDevicePoses.Ptr)\r
+ return ovrError_NotInitialized;\r
+ return API.ovr_GetDevicePoses.Ptr(session, deviceTypes, deviceCount, absTime, outDevicePoses);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrTrackingState)\r
+ovr_GetTrackingStateWithSensorData(\r
+ ovrSession session,\r
+ double absTime,\r
+ ovrBool latencyMarker,\r
+ ovrSensorData* sensorData) {\r
+ if (!API.ovr_GetTrackingStateWithSensorData.Ptr) {\r
+ ovrTrackingState nullTrackingState;\r
+ memset(&nullTrackingState, 0, sizeof(nullTrackingState));\r
+ if (sensorData)\r
+ memset(&sensorData, 0, sizeof(sensorData));\r
+ return nullTrackingState;\r
+ }\r
+\r
+ return API.ovr_GetTrackingStateWithSensorData.Ptr(session, absTime, latencyMarker, sensorData);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrTrackerPose)\r
+ovr_GetTrackerPose(ovrSession session, unsigned int trackerPoseIndex) {\r
+ if (!API.ovr_GetTrackerPose.Ptr) {\r
+ ovrTrackerPose nullTrackerPose;\r
+ memset(&nullTrackerPose, 0, sizeof(nullTrackerPose));\r
+ return nullTrackerPose;\r
+ }\r
+\r
+ return API.ovr_GetTrackerPose.Ptr(session, trackerPoseIndex);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_GetInputState(ovrSession session, ovrControllerType controllerType, ovrInputState* inputState) {\r
+ if (!API.ovr_GetInputState.Ptr) {\r
+ if (inputState)\r
+ memset(inputState, 0, sizeof(ovrInputState));\r
+ return ovrError_NotInitialized;\r
+ }\r
+ return API.ovr_GetInputState.Ptr(session, controllerType, inputState);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(unsigned int) ovr_GetConnectedControllerTypes(ovrSession session) {\r
+ if (!API.ovr_GetConnectedControllerTypes.Ptr) {\r
+ return 0;\r
+ }\r
+ return API.ovr_GetConnectedControllerTypes.Ptr(session);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrTouchHapticsDesc)\r
+ovr_GetTouchHapticsDesc(ovrSession session, ovrControllerType controllerType) {\r
+ if (!API.ovr_GetTouchHapticsDesc.Ptr) {\r
+ ovrTouchHapticsDesc nullDesc;\r
+ memset(&nullDesc, 0, sizeof(nullDesc));\r
+ return nullDesc;\r
+ }\r
+\r
+ return API.ovr_GetTouchHapticsDesc.Ptr(session, controllerType);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_SetControllerVibration(\r
+ ovrSession session,\r
+ ovrControllerType controllerType,\r
+ float frequency,\r
+ float amplitude) {\r
+ if (!API.ovr_SetControllerVibration.Ptr)\r
+ return ovrError_NotInitialized;\r
+\r
+ return API.ovr_SetControllerVibration.Ptr(session, controllerType, frequency, amplitude);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_SubmitControllerVibration(\r
+ ovrSession session,\r
+ ovrControllerType controllerType,\r
+ const ovrHapticsBuffer* buffer) {\r
+ if (!API.ovr_SubmitControllerVibration.Ptr)\r
+ return ovrError_NotInitialized;\r
+\r
+ return API.ovr_SubmitControllerVibration.Ptr(session, controllerType, buffer);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_GetControllerVibrationState(\r
+ ovrSession session,\r
+ ovrControllerType controllerType,\r
+ ovrHapticsPlaybackState* outState) {\r
+ if (!API.ovr_GetControllerVibrationState.Ptr)\r
+ return ovrError_NotInitialized;\r
+\r
+ return API.ovr_GetControllerVibrationState.Ptr(session, controllerType, outState);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_TestBoundary(\r
+ ovrSession session,\r
+ ovrTrackedDeviceType deviceBitmask,\r
+ ovrBoundaryType singleBoundaryType,\r
+ ovrBoundaryTestResult* outTestResult) {\r
+ if (!API.ovr_TestBoundary.Ptr)\r
+ return ovrError_NotInitialized;\r
+\r
+ return API.ovr_TestBoundary.Ptr(session, deviceBitmask, singleBoundaryType, outTestResult);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_TestBoundaryPoint(\r
+ ovrSession session,\r
+ const ovrVector3f* point,\r
+ ovrBoundaryType singleBoundaryType,\r
+ ovrBoundaryTestResult* outTestResult) {\r
+ if (!API.ovr_TestBoundaryPoint.Ptr)\r
+ return ovrError_NotInitialized;\r
+\r
+ return API.ovr_TestBoundaryPoint.Ptr(session, point, singleBoundaryType, outTestResult);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_SetBoundaryLookAndFeel(ovrSession session, const ovrBoundaryLookAndFeel* lookAndFeel) {\r
+ if (!API.ovr_SetBoundaryLookAndFeel.Ptr)\r
+ return ovrError_NotInitialized;\r
+\r
+ return API.ovr_SetBoundaryLookAndFeel.Ptr(session, lookAndFeel);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_ResetBoundaryLookAndFeel(ovrSession session) {\r
+ if (!API.ovr_ResetBoundaryLookAndFeel.Ptr)\r
+ return ovrError_NotInitialized;\r
+\r
+ return API.ovr_ResetBoundaryLookAndFeel.Ptr(session);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_GetBoundaryGeometry(\r
+ ovrSession session,\r
+ ovrBoundaryType singleBoundaryType,\r
+ ovrVector3f* outFloorPoints,\r
+ int* outFloorPointsCount) {\r
+ if (!API.ovr_GetBoundaryGeometry.Ptr)\r
+ return ovrError_NotInitialized;\r
+\r
+ return API.ovr_GetBoundaryGeometry.Ptr(\r
+ session, singleBoundaryType, outFloorPoints, outFloorPointsCount);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_GetBoundaryDimensions(\r
+ ovrSession session,\r
+ ovrBoundaryType singleBoundaryType,\r
+ ovrVector3f* outDimensions) {\r
+ if (!API.ovr_GetBoundaryDimensions.Ptr)\r
+ return ovrError_NotInitialized;\r
+\r
+ return API.ovr_GetBoundaryDimensions.Ptr(session, singleBoundaryType, outDimensions);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetBoundaryVisible(ovrSession session, ovrBool* outIsVisible) {\r
+ if (!API.ovr_GetBoundaryVisible.Ptr)\r
+ return ovrError_NotInitialized;\r
+\r
+ return API.ovr_GetBoundaryVisible.Ptr(session, outIsVisible);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_RequestBoundaryVisible(ovrSession session, ovrBool visible) {\r
+ if (!API.ovr_RequestBoundaryVisible.Ptr)\r
+ return ovrError_NotInitialized;\r
+\r
+ return API.ovr_RequestBoundaryVisible.Ptr(session, visible);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrSizei)\r
+ovr_GetFovTextureSize(\r
+ ovrSession session,\r
+ ovrEyeType eye,\r
+ ovrFovPort fov,\r
+ float pixelsPerDisplayPixel) {\r
+ if (!API.ovr_GetFovTextureSize.Ptr) {\r
+ ovrSizei nullSize;\r
+ memset(&nullSize, 0, sizeof(nullSize));\r
+ return nullSize;\r
+ }\r
+\r
+ return API.ovr_GetFovTextureSize.Ptr(session, eye, fov, pixelsPerDisplayPixel);\r
+}\r
+\r
+#if defined(_WIN32)\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_CreateTextureSwapChainDX(\r
+ ovrSession session,\r
+ IUnknown* d3dPtr,\r
+ const ovrTextureSwapChainDesc* desc,\r
+ ovrTextureSwapChain* outTextureSet) {\r
+ if (!API.ovr_CreateTextureSwapChainDX.Ptr)\r
+ return ovrError_NotInitialized;\r
+\r
+ return API.ovr_CreateTextureSwapChainDX.Ptr(session, d3dPtr, desc, outTextureSet);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_CreateMirrorTextureDX(\r
+ ovrSession session,\r
+ IUnknown* d3dPtr,\r
+ const ovrMirrorTextureDesc* desc,\r
+ ovrMirrorTexture* outMirrorTexture) {\r
+ if (!API.ovr_CreateMirrorTextureDX.Ptr)\r
+ return ovrError_NotInitialized;\r
+\r
+ return API.ovr_CreateMirrorTextureDX.Ptr(session, d3dPtr, desc, outMirrorTexture);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_CreateMirrorTextureWithOptionsDX(\r
+ ovrSession session,\r
+ IUnknown* d3dPtr,\r
+ const ovrMirrorTextureDesc* desc,\r
+ ovrMirrorTexture* outMirrorTexture) {\r
+ if (!API.ovr_CreateMirrorTextureWithOptionsDX.Ptr)\r
+ return ovrError_NotInitialized;\r
+\r
+ return API.ovr_CreateMirrorTextureWithOptionsDX.Ptr(session, d3dPtr, desc, outMirrorTexture);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_GetTextureSwapChainBufferDX(\r
+ ovrSession session,\r
+ ovrTextureSwapChain chain,\r
+ int index,\r
+ IID iid,\r
+ void** ppObject) {\r
+ if (!API.ovr_GetTextureSwapChainBufferDX.Ptr)\r
+ return ovrError_NotInitialized;\r
+\r
+ return API.ovr_GetTextureSwapChainBufferDX.Ptr(session, chain, index, iid, ppObject);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_GetMirrorTextureBufferDX(\r
+ ovrSession session,\r
+ ovrMirrorTexture mirror,\r
+ IID iid,\r
+ void** ppObject) {\r
+ if (!API.ovr_GetMirrorTextureBufferDX.Ptr)\r
+ return ovrError_NotInitialized;\r
+\r
+ return API.ovr_GetMirrorTextureBufferDX.Ptr(session, mirror, iid, ppObject);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetAudioDeviceOutWaveId(unsigned int* deviceOutId) {\r
+ if (!API.ovr_GetAudioDeviceOutWaveId.Ptr)\r
+ return ovrError_NotInitialized;\r
+\r
+ return API.ovr_GetAudioDeviceOutWaveId.Ptr(deviceOutId);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetAudioDeviceInWaveId(unsigned int* deviceInId) {\r
+ if (!API.ovr_GetAudioDeviceInWaveId.Ptr)\r
+ return ovrError_NotInitialized;\r
+\r
+ return API.ovr_GetAudioDeviceInWaveId.Ptr(deviceInId);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetAudioDeviceOutGuidStr(WCHAR* deviceOutStrBuffer) {\r
+ if (!API.ovr_GetAudioDeviceOutGuidStr.Ptr)\r
+ return ovrError_NotInitialized;\r
+\r
+ return API.ovr_GetAudioDeviceOutGuidStr.Ptr(deviceOutStrBuffer);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetAudioDeviceOutGuid(GUID* deviceOutGuid) {\r
+ if (!API.ovr_GetAudioDeviceOutGuid.Ptr)\r
+ return ovrError_NotInitialized;\r
+\r
+ return API.ovr_GetAudioDeviceOutGuid.Ptr(deviceOutGuid);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetAudioDeviceInGuidStr(WCHAR* deviceInStrBuffer) {\r
+ if (!API.ovr_GetAudioDeviceInGuidStr.Ptr)\r
+ return ovrError_NotInitialized;\r
+\r
+ return API.ovr_GetAudioDeviceInGuidStr.Ptr(deviceInStrBuffer);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetAudioDeviceInGuid(GUID* deviceInGuid) {\r
+ if (!API.ovr_GetAudioDeviceInGuid.Ptr)\r
+ return ovrError_NotInitialized;\r
+\r
+ return API.ovr_GetAudioDeviceInGuid.Ptr(deviceInGuid);\r
+}\r
+\r
+#endif\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_CreateTextureSwapChainGL(\r
+ ovrSession session,\r
+ const ovrTextureSwapChainDesc* desc,\r
+ ovrTextureSwapChain* outTextureSet) {\r
+ if (!API.ovr_CreateTextureSwapChainGL.Ptr)\r
+ return ovrError_NotInitialized;\r
+\r
+ return API.ovr_CreateTextureSwapChainGL.Ptr(session, desc, outTextureSet);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_CreateMirrorTextureGL(\r
+ ovrSession session,\r
+ const ovrMirrorTextureDesc* desc,\r
+ ovrMirrorTexture* outMirrorTexture) {\r
+ if (!API.ovr_CreateMirrorTextureGL.Ptr)\r
+ return ovrError_NotInitialized;\r
+\r
+ return API.ovr_CreateMirrorTextureGL.Ptr(session, desc, outMirrorTexture);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_CreateMirrorTextureWithOptionsGL(\r
+ ovrSession session,\r
+ const ovrMirrorTextureDesc* desc,\r
+ ovrMirrorTexture* outMirrorTexture) {\r
+ if (!API.ovr_CreateMirrorTextureWithOptionsGL.Ptr)\r
+ return ovrError_NotInitialized;\r
+\r
+ return API.ovr_CreateMirrorTextureWithOptionsGL.Ptr(session, desc, outMirrorTexture);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_GetTextureSwapChainBufferGL(\r
+ ovrSession session,\r
+ ovrTextureSwapChain chain,\r
+ int index,\r
+ unsigned int* texId) {\r
+ if (!API.ovr_GetTextureSwapChainBufferGL.Ptr)\r
+ return ovrError_NotInitialized;\r
+\r
+ return API.ovr_GetTextureSwapChainBufferGL.Ptr(session, chain, index, texId);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_GetMirrorTextureBufferGL(ovrSession session, ovrMirrorTexture mirror, unsigned int* texId) {\r
+ if (!API.ovr_GetMirrorTextureBufferGL.Ptr)\r
+ return ovrError_NotInitialized;\r
+\r
+ return API.ovr_GetMirrorTextureBufferGL.Ptr(session, mirror, texId);\r
+}\r
+\r
+#if !defined(OSX_UNIMPLEMENTED)\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_GetInstanceExtensionsVk(\r
+ ovrGraphicsLuid luid,\r
+ char* extensionNames,\r
+ uint32_t* inoutExtensionNamesSize) {\r
+ if (!API.ovr_GetInstanceExtensionsVk.Ptr)\r
+ return ovrError_NotInitialized;\r
+\r
+ return API.ovr_GetInstanceExtensionsVk.Ptr(luid, extensionNames, inoutExtensionNamesSize);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_GetDeviceExtensionsVk(\r
+ ovrGraphicsLuid luid,\r
+ char* extensionNames,\r
+ uint32_t* inoutExtensionNamesSize) {\r
+ if (!API.ovr_GetDeviceExtensionsVk.Ptr)\r
+ return ovrError_NotInitialized;\r
+\r
+ return API.ovr_GetDeviceExtensionsVk.Ptr(luid, extensionNames, inoutExtensionNamesSize);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_GetSessionPhysicalDeviceVk(\r
+ ovrSession session,\r
+ ovrGraphicsLuid luid,\r
+ VkInstance instance,\r
+ VkPhysicalDevice* out_physicalDevice) {\r
+ if (!API.ovr_GetSessionPhysicalDeviceVk.Ptr)\r
+ return ovrError_NotInitialized;\r
+\r
+ return API.ovr_GetSessionPhysicalDeviceVk.Ptr(session, luid, instance, out_physicalDevice);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_SetSynchronizationQueueVk(ovrSession session, VkQueue queue) {\r
+ if (!API.ovr_SetSynchronizationQueueVk.Ptr)\r
+ return ovrError_NotInitialized;\r
+\r
+ return API.ovr_SetSynchronizationQueueVk.Ptr(session, queue);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_CreateTextureSwapChainVk(\r
+ ovrSession session,\r
+ VkDevice device,\r
+ const ovrTextureSwapChainDesc* desc,\r
+ ovrTextureSwapChain* out_TextureSwapChain) {\r
+ if (!API.ovr_CreateTextureSwapChainVk.Ptr)\r
+ return ovrError_NotInitialized;\r
+\r
+ return API.ovr_CreateTextureSwapChainVk.Ptr(session, device, desc, out_TextureSwapChain);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_GetTextureSwapChainBufferVk(\r
+ ovrSession session,\r
+ ovrTextureSwapChain chain,\r
+ int index,\r
+ VkImage* out_Image) {\r
+ if (!API.ovr_GetTextureSwapChainBufferVk.Ptr)\r
+ return ovrError_NotInitialized;\r
+\r
+ return API.ovr_GetTextureSwapChainBufferVk.Ptr(session, chain, index, out_Image);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_CreateMirrorTextureWithOptionsVk(\r
+ ovrSession session,\r
+ VkDevice device,\r
+ const ovrMirrorTextureDesc* desc,\r
+ ovrMirrorTexture* out_MirrorTexture) {\r
+ if (!API.ovr_CreateMirrorTextureWithOptionsVk.Ptr)\r
+ return ovrError_NotInitialized;\r
+\r
+ return API.ovr_CreateMirrorTextureWithOptionsVk.Ptr(session, device, desc, out_MirrorTexture);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_GetMirrorTextureBufferVk(\r
+ ovrSession session,\r
+ ovrMirrorTexture mirrorTexture,\r
+ VkImage* out_Image) {\r
+ if (!API.ovr_GetMirrorTextureBufferVk.Ptr)\r
+ return ovrError_NotInitialized;\r
+\r
+ return API.ovr_GetMirrorTextureBufferVk.Ptr(session, mirrorTexture, out_Image);\r
+}\r
+#endif // OSX_UNIMPLEMENTED\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_GetTextureSwapChainLength(ovrSession session, ovrTextureSwapChain chain, int* length) {\r
+ if (!API.ovr_GetTextureSwapChainLength.Ptr)\r
+ return ovrError_NotInitialized;\r
+\r
+ return API.ovr_GetTextureSwapChainLength.Ptr(session, chain, length);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_GetTextureSwapChainCurrentIndex(\r
+ ovrSession session,\r
+ ovrTextureSwapChain chain,\r
+ int* currentIndex) {\r
+ if (!API.ovr_GetTextureSwapChainCurrentIndex.Ptr)\r
+ return ovrError_NotInitialized;\r
+\r
+ return API.ovr_GetTextureSwapChainCurrentIndex.Ptr(session, chain, currentIndex);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_GetTextureSwapChainDesc(\r
+ ovrSession session,\r
+ ovrTextureSwapChain chain,\r
+ ovrTextureSwapChainDesc* desc) {\r
+ if (!API.ovr_GetTextureSwapChainDesc.Ptr)\r
+ return ovrError_NotInitialized;\r
+\r
+ return API.ovr_GetTextureSwapChainDesc.Ptr(session, chain, desc);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_CommitTextureSwapChain(ovrSession session, ovrTextureSwapChain chain) {\r
+ if (!API.ovr_CommitTextureSwapChain.Ptr)\r
+ return ovrError_NotInitialized;\r
+\r
+ return API.ovr_CommitTextureSwapChain.Ptr(session, chain);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(void)\r
+ovr_DestroyTextureSwapChain(ovrSession session, ovrTextureSwapChain chain) {\r
+ if (!API.ovr_DestroyTextureSwapChain.Ptr)\r
+ return;\r
+\r
+ API.ovr_DestroyTextureSwapChain.Ptr(session, chain);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(void)\r
+ovr_DestroyMirrorTexture(ovrSession session, ovrMirrorTexture mirrorTexture) {\r
+ if (!API.ovr_DestroyMirrorTexture.Ptr)\r
+ return;\r
+\r
+ API.ovr_DestroyMirrorTexture.Ptr(session, mirrorTexture);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_WaitToBeginFrame(ovrSession session, long long frameIndex) {\r
+ if (!API.ovr_WaitToBeginFrame.Ptr)\r
+ return ovrError_NotInitialized;\r
+\r
+ return API.ovr_WaitToBeginFrame.Ptr(session, frameIndex);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_BeginFrame(ovrSession session, long long frameIndex) {\r
+ if (!API.ovr_BeginFrame.Ptr)\r
+ return ovrError_NotInitialized;\r
+\r
+ return API.ovr_BeginFrame.Ptr(session, frameIndex);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_EndFrame(\r
+ ovrSession session,\r
+ long long frameIndex,\r
+ const ovrViewScaleDesc* viewScaleDesc,\r
+ ovrLayerHeader const* const* layerPtrList,\r
+ unsigned int layerCount) {\r
+ if (!API.ovr_EndFrame.Ptr)\r
+ return ovrError_NotInitialized;\r
+\r
+ return API.ovr_EndFrame.Ptr(session, frameIndex, viewScaleDesc, layerPtrList, layerCount);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_SubmitFrame(\r
+ ovrSession session,\r
+ long long frameIndex,\r
+ const ovrViewScaleDesc* viewScaleDesc,\r
+ ovrLayerHeader const* const* layerPtrList,\r
+ unsigned int layerCount) {\r
+ if (!API.ovr_SubmitFrame.Ptr)\r
+ return ovrError_NotInitialized;\r
+\r
+ return API.ovr_SubmitFrame.Ptr(session, frameIndex, viewScaleDesc, layerPtrList, layerCount);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrEyeRenderDesc)\r
+ovr_GetRenderDesc(ovrSession session, ovrEyeType eyeType, ovrFovPort fov) {\r
+ if (!API.ovr_GetRenderDesc.Ptr) {\r
+ ovrEyeRenderDesc nullEyeRenderDesc;\r
+ memset(&nullEyeRenderDesc, 0, sizeof(nullEyeRenderDesc));\r
+ return nullEyeRenderDesc;\r
+ }\r
+ return API.ovr_GetRenderDesc.Ptr(session, eyeType, fov);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetPerfStats(ovrSession session, ovrPerfStats* outPerfStats) {\r
+ if (!API.ovr_GetPerfStats.Ptr)\r
+ return ovrError_NotInitialized;\r
+\r
+ return API.ovr_GetPerfStats.Ptr(session, outPerfStats);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_ResetPerfStats(ovrSession session) {\r
+ if (!API.ovr_ResetPerfStats.Ptr)\r
+ return ovrError_NotInitialized;\r
+\r
+ return API.ovr_ResetPerfStats.Ptr(session);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(double) ovr_GetPredictedDisplayTime(ovrSession session, long long frameIndex) {\r
+ if (!API.ovr_GetPredictedDisplayTime.Ptr)\r
+ return 0.0;\r
+\r
+ return API.ovr_GetPredictedDisplayTime.Ptr(session, frameIndex);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(double) ovr_GetTimeInSeconds() {\r
+ if (!API.ovr_GetTimeInSeconds.Ptr)\r
+ return 0.;\r
+ return API.ovr_GetTimeInSeconds.Ptr();\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrBool)\r
+ovr_GetBool(ovrSession session, const char* propertyName, ovrBool defaultVal) {\r
+ if (!API.ovr_GetBool.Ptr)\r
+ return ovrFalse;\r
+ return API.ovr_GetBool.Ptr(session, propertyName, defaultVal);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrBool)\r
+ovr_SetBool(ovrSession session, const char* propertyName, ovrBool value) {\r
+ if (!API.ovr_SetBool.Ptr)\r
+ return ovrFalse;\r
+ return API.ovr_SetBool.Ptr(session, propertyName, value);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(int) ovr_GetInt(ovrSession session, const char* propertyName, int defaultVal) {\r
+ if (!API.ovr_GetInt.Ptr)\r
+ return 0;\r
+ return API.ovr_GetInt.Ptr(session, propertyName, defaultVal);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrBool) ovr_SetInt(ovrSession session, const char* propertyName, int value) {\r
+ if (!API.ovr_SetInt.Ptr)\r
+ return ovrFalse;\r
+ return API.ovr_SetInt.Ptr(session, propertyName, value);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(float)\r
+ovr_GetFloat(ovrSession session, const char* propertyName, float defaultVal) {\r
+ if (!API.ovr_GetFloat.Ptr)\r
+ return 0.f;\r
+ return API.ovr_GetFloat.Ptr(session, propertyName, defaultVal);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrBool)\r
+ovr_SetFloat(ovrSession session, const char* propertyName, float value) {\r
+ if (!API.ovr_SetFloat.Ptr)\r
+ return ovrFalse;\r
+ return API.ovr_SetFloat.Ptr(session, propertyName, value);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(unsigned int)\r
+ovr_GetFloatArray(\r
+ ovrSession session,\r
+ const char* propertyName,\r
+ float values[],\r
+ unsigned int arraySize) {\r
+ if (!API.ovr_GetFloatArray.Ptr)\r
+ return 0;\r
+ return API.ovr_GetFloatArray.Ptr(session, propertyName, values, arraySize);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrBool)\r
+ovr_SetFloatArray(\r
+ ovrSession session,\r
+ const char* propertyName,\r
+ const float values[],\r
+ unsigned int arraySize) {\r
+ if (!API.ovr_SetFloatArray.Ptr)\r
+ return ovrFalse;\r
+ return API.ovr_SetFloatArray.Ptr(session, propertyName, values, arraySize);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(const char*)\r
+ovr_GetString(ovrSession session, const char* propertyName, const char* defaultVal) {\r
+ if (!API.ovr_GetString.Ptr)\r
+ return "(Unable to load LibOVR)";\r
+ return API.ovr_GetString.Ptr(session, propertyName, defaultVal);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrBool)\r
+ovr_SetString(ovrSession session, const char* propertyName, const char* value) {\r
+ if (!API.ovr_SetString.Ptr)\r
+ return ovrFalse;\r
+ return API.ovr_SetString.Ptr(session, propertyName, value);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(int) ovr_TraceMessage(int level, const char* message) {\r
+ if (!API.ovr_TraceMessage.Ptr)\r
+ return -1;\r
+\r
+ return API.ovr_TraceMessage.Ptr(level, message);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_IdentifyClient(const char* identity) {\r
+ if (!API.ovr_IdentifyClient.Ptr)\r
+ return ovrError_NotInitialized;\r
+\r
+ return API.ovr_IdentifyClient.Ptr(identity);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_Lookup(const char* name, void** data) {\r
+ if (!API.ovr_Lookup.Ptr)\r
+ return ovrError_NotInitialized;\r
+ return API.ovr_Lookup.Ptr(name, data);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_GetExternalCameras(\r
+ ovrSession session,\r
+ ovrExternalCamera* outCameras,\r
+ unsigned int* outCameraCount) {\r
+ if (!API.ovr_GetExternalCameras.Ptr)\r
+ return ovrError_NotInitialized;\r
+ if (!outCameras || !outCameraCount)\r
+ return ovrError_InvalidParameter;\r
+\r
+ return API.ovr_GetExternalCameras.Ptr(session, outCameras, outCameraCount);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_SetExternalCameraProperties(\r
+ ovrSession session,\r
+ const char* name,\r
+ const ovrCameraIntrinsics* const intrinsics,\r
+ const ovrCameraExtrinsics* const extrinsics) {\r
+ if (!API.ovr_SetExternalCameraProperties.Ptr)\r
+ return ovrError_NotInitialized;\r
+ if (!name || (!intrinsics && !extrinsics))\r
+ return ovrError_InvalidParameter;\r
+\r
+ return API.ovr_SetExternalCameraProperties.Ptr(session, name, intrinsics, extrinsics);\r
+}\r
+#if defined(_MSC_VER)\r
+#pragma warning(pop)\r
+#endif\r
--- /dev/null
+/********************************************************************************/ /**\r
+ \file OVR_CAPI_Prototypes.h\r
+ \brief Internal CAPI prototype listing macros\r
+ \copyright Copyright 2016 Oculus VR, LLC. All Rights reserved.\r
+ ************************************************************************************/\r
+\r
+#ifndef OVR_CAPI_Prototypes_h\r
+#define OVR_CAPI_Prototypes_h\r
+\r
+#include "OVR_CAPI.h"\r
+\r
+\r
+//\r
+// OVR_LIST_*_APIS - apply passed in macros to a list of API entrypoints\r
+//\r
+// The _ macro argument is applied for all current API versions\r
+// The X macro argument is applied for back-compat API versions\r
+//\r
+// The tuple passed to either macro is (ReturnType, FunctionName, OptionalVersion, ParameterList)\r
+//\r
+\r
+\r
+// clang-format off\r
+\r
+#define OVR_LIST_PUBLIC_APIS(_,X) \\r
+X(ovrBool, ovr_InitializeRenderingShimVersion, , (int requestedMinorVersion)) \\r
+_(ovrResult, ovr_Initialize, , (const ovrInitParams* params)) \\r
+_(void, ovr_Shutdown, , (void)) \\r
+_(const char*, ovr_GetVersionString, , (void)) \\r
+_(void, ovr_GetLastErrorInfo, , (ovrErrorInfo* errorInfo)) \\r
+_(ovrHmdDesc, ovr_GetHmdDesc, , (ovrSession session)) \\r
+_(unsigned int, ovr_GetTrackerCount, , (ovrSession session)) \\r
+_(ovrTrackerDesc, ovr_GetTrackerDesc, , (ovrSession session, unsigned int trackerDescIndex)) \\r
+_(ovrResult, ovr_Create, , (ovrSession* pSession, ovrGraphicsLuid* pLuid)) \\r
+_(void, ovr_Destroy, , (ovrSession session)) \\r
+_(ovrResult, ovr_GetSessionStatus, , (ovrSession session, ovrSessionStatus* sessionStatus)) \\r
+_(ovrResult, ovr_IsExtensionSupported, , (ovrSession session, ovrExtensions extension, ovrBool* outExtensionSupported)) \\r
+_(ovrResult, ovr_EnableExtension, , (ovrSession session, ovrExtensions extension)) \\r
+_(ovrResult, ovr_SetTrackingOriginType, , (ovrSession session, ovrTrackingOrigin origin)) \\r
+_(ovrTrackingOrigin, ovr_GetTrackingOriginType, , (ovrSession session)) \\r
+_(ovrResult, ovr_RecenterTrackingOrigin, , (ovrSession session)) \\r
+_(ovrResult, ovr_SpecifyTrackingOrigin, , (ovrSession session, ovrPosef originPose)) \\r
+_(void, ovr_ClearShouldRecenterFlag, , (ovrSession session)) \\r
+_(ovrTrackingState, ovr_GetTrackingState, , (ovrSession session, double absTime, ovrBool latencyMarker)) \\r
+_(ovrTrackerPose, ovr_GetTrackerPose, , (ovrSession session, unsigned int index)) \\r
+_(ovrResult, ovr_GetInputState, , (ovrSession session, ovrControllerType controllerType, ovrInputState*)) \\r
+_(unsigned int, ovr_GetConnectedControllerTypes, , (ovrSession session)) \\r
+_(ovrSizei, ovr_GetFovTextureSize, , (ovrSession session, ovrEyeType eye, ovrFovPort fov, float pixelsPerDisplayPixel)) \\r
+_(ovrResult, ovr_WaitToBeginFrame, , (ovrSession session, long long frameIndex)) \\r
+_(ovrResult, ovr_BeginFrame, , (ovrSession session, long long frameIndex)) \\r
+_(ovrResult, ovr_EndFrame, , (ovrSession session, long long frameIndex, const ovrViewScaleDesc* viewScaleDesc, ovrLayerHeader const * const * layerPtrList, unsigned int layerCount)) \\r
+X(ovrResult, ovr_SubmitFrame, , (ovrSession session, long long frameIndex, const ovrViewScaleDescPre117* viewScaleDesc, ovrLayerHeader const * const * layerPtrList, unsigned int layerCount)) \\r
+_(ovrResult, ovr_SubmitFrame, 2, (ovrSession session, long long frameIndex, const ovrViewScaleDesc* viewScaleDesc, ovrLayerHeader const * const * layerPtrList, unsigned int layerCount)) \\r
+X(ovrEyeRenderDescPre117, ovr_GetRenderDesc, , (ovrSession session, ovrEyeType eyeType, ovrFovPort fov)) \\r
+_(ovrEyeRenderDesc, ovr_GetRenderDesc, 2, (ovrSession session, ovrEyeType eyeType, ovrFovPort fov)) \\r
+_(double, ovr_GetPredictedDisplayTime, , (ovrSession session, long long frameIndex)) \\r
+_(double, ovr_GetTimeInSeconds, , (void)) \\r
+_(ovrBool, ovr_GetBool, , (ovrSession session, const char* propertyName, ovrBool defaultVal)) \\r
+_(ovrBool, ovr_SetBool, , (ovrSession session, const char* propertyName, ovrBool value)) \\r
+_(int, ovr_GetInt, , (ovrSession session, const char* propertyName, int defaultVal)) \\r
+_(ovrBool, ovr_SetInt, , (ovrSession session, const char* propertyName, int value)) \\r
+_(float, ovr_GetFloat, , (ovrSession session, const char* propertyName, float defaultVal)) \\r
+_(ovrBool, ovr_SetFloat, , (ovrSession session, const char* propertyName, float value)) \\r
+_(unsigned int, ovr_GetFloatArray, , (ovrSession session, const char* propertyName, float values[], unsigned int arraySize)) \\r
+_(ovrBool, ovr_SetFloatArray, , (ovrSession session, const char* propertyName, const float values[], unsigned int arraySize)) \\r
+_(const char*, ovr_GetString, , (ovrSession session, const char* propertyName, const char* defaultVal)) \\r
+_(ovrBool, ovr_SetString, , (ovrSession session, const char* propertyName, const char* value)) \\r
+_(int, ovr_TraceMessage, , (int level, const char* message)) \\r
+_(ovrResult, ovr_IdentifyClient, , (const char* identity)) \\r
+_(ovrResult, ovr_CreateTextureSwapChainGL, , (ovrSession session, const ovrTextureSwapChainDesc* desc, ovrTextureSwapChain* outTextureChain)) \\r
+_(ovrResult, ovr_CreateMirrorTextureGL, , (ovrSession session, const ovrMirrorTextureDesc* desc, ovrMirrorTexture* outMirrorTexture)) \\r
+_(ovrResult, ovr_CreateMirrorTextureWithOptionsGL, , (ovrSession session, const ovrMirrorTextureDesc* desc, ovrMirrorTexture* outMirrorTexture)) \\r
+_(ovrResult, ovr_GetTextureSwapChainBufferGL, , (ovrSession session, ovrTextureSwapChain chain, int index, unsigned int* texId)) \\r
+_(ovrResult, ovr_GetMirrorTextureBufferGL, , (ovrSession session, ovrMirrorTexture mirror, unsigned int* texId)) \\r
+_(ovrResult, ovr_GetTextureSwapChainLength, , (ovrSession session, ovrTextureSwapChain chain, int* length)) \\r
+_(ovrResult, ovr_GetTextureSwapChainCurrentIndex, , (ovrSession session, ovrTextureSwapChain chain, int* currentIndex)) \\r
+_(ovrResult, ovr_GetTextureSwapChainDesc, , (ovrSession session, ovrTextureSwapChain chain, ovrTextureSwapChainDesc* desc)) \\r
+_(ovrResult, ovr_CommitTextureSwapChain, , (ovrSession session, ovrTextureSwapChain chain)) \\r
+_(void, ovr_DestroyTextureSwapChain, , (ovrSession session, ovrTextureSwapChain chain)) \\r
+_(void, ovr_DestroyMirrorTexture, , (ovrSession session, ovrMirrorTexture texture)) \\r
+X(ovrResult, ovr_SetQueueAheadFraction, , (ovrSession session, float queueAheadFraction)) \\r
+_(ovrResult, ovr_Lookup, , (const char* name, void** data)) \\r
+_(ovrTouchHapticsDesc, ovr_GetTouchHapticsDesc, , (ovrSession session, ovrControllerType controllerType)) \\r
+_(ovrResult, ovr_SetControllerVibration, , (ovrSession session, ovrControllerType controllerType, float frequency, float amplitude)) \\r
+_(ovrResult, ovr_SubmitControllerVibration, , (ovrSession session, ovrControllerType controllerType, const ovrHapticsBuffer* buffer)) \\r
+_(ovrResult, ovr_GetControllerVibrationState, , (ovrSession session, ovrControllerType controllerType, ovrHapticsPlaybackState* outState)) \\r
+_(ovrResult, ovr_TestBoundary, , (ovrSession session, ovrTrackedDeviceType deviceBitmask, ovrBoundaryType singleBoundaryType, ovrBoundaryTestResult* outTestResult)) \\r
+_(ovrResult, ovr_TestBoundaryPoint, , (ovrSession session, const ovrVector3f* point, ovrBoundaryType singleBoundaryType, ovrBoundaryTestResult* outTestResult)) \\r
+_(ovrResult, ovr_SetBoundaryLookAndFeel, , (ovrSession session, const ovrBoundaryLookAndFeel* lookAndFeel)) \\r
+_(ovrResult, ovr_ResetBoundaryLookAndFeel, , (ovrSession session)) \\r
+_(ovrResult, ovr_GetBoundaryGeometry, , (ovrSession session, ovrBoundaryType singleBoundaryType, ovrVector3f* outFloorPoints, int* outFloorPointsCount)) \\r
+_(ovrResult, ovr_GetBoundaryDimensions, , (ovrSession session, ovrBoundaryType singleBoundaryType, ovrVector3f* outDimension)) \\r
+_(ovrResult, ovr_GetBoundaryVisible, , (ovrSession session, ovrBool* outIsVisible)) \\r
+_(ovrResult, ovr_RequestBoundaryVisible, , (ovrSession session, ovrBool visible)) \\r
+_(ovrResult, ovr_GetPerfStats, , (ovrSession session, ovrPerfStats* outPerfStats)) \\r
+_(ovrResult, ovr_ResetPerfStats, , (ovrSession session))\\r
+_(ovrResult, ovr_GetExternalCameras, , (ovrSession session, ovrExternalCamera* outCameras, unsigned int* outCameraCount))\\r
+_(ovrResult, ovr_SetExternalCameraProperties, , (ovrSession session, const char* name, const ovrCameraIntrinsics* const intrinsics, const ovrCameraExtrinsics* const extrinsics ))\\r
+\r
+#if defined (_WIN32)\r
+#define OVR_LIST_WIN32_APIS(_,X) \\r
+ _(ovrResult, ovr_CreateTextureSwapChainDX, , (ovrSession session, IUnknown* d3dPtr, const ovrTextureSwapChainDesc* desc, ovrTextureSwapChain* outTextureChain)) \\r
+ _(ovrResult, ovr_CreateMirrorTextureDX, , (ovrSession session, IUnknown* d3dPtr, const ovrMirrorTextureDesc* desc, ovrMirrorTexture* outMirrorTexture)) \\r
+ _(ovrResult, ovr_CreateMirrorTextureWithOptionsDX, , (ovrSession session, IUnknown* d3dPtr, const ovrMirrorTextureDesc* desc, ovrMirrorTexture* outMirrorTexture)) \\r
+ _(ovrResult, ovr_GetTextureSwapChainBufferDX, , (ovrSession session, ovrTextureSwapChain chain, int index, IID iid, void** ppObject)) \\r
+ _(ovrResult, ovr_GetMirrorTextureBufferDX, , (ovrSession session, ovrMirrorTexture mirror, IID iid, void** ppObject)) \\r
+ _(ovrResult, ovr_GetAudioDeviceOutWaveId, , (UINT* deviceOutId)) \\r
+ _(ovrResult, ovr_GetAudioDeviceInWaveId, , (UINT* deviceInId)) \\r
+ _(ovrResult, ovr_GetAudioDeviceOutGuidStr, , (WCHAR* deviceOutStrBuffer)) \\r
+ _(ovrResult, ovr_GetAudioDeviceOutGuid, , (GUID* deviceOutGuid)) \\r
+ _(ovrResult, ovr_GetAudioDeviceInGuidStr, , (WCHAR* deviceInStrBuffer)) \\r
+ _(ovrResult, ovr_GetAudioDeviceInGuid, , (GUID* deviceInGuid)) \\r
+ _(ovrResult, ovr_GetInstanceExtensionsVk, , (ovrGraphicsLuid luid, char* extensionNames, uint32_t* inoutExtensionNamesSize)) \\r
+ _(ovrResult, ovr_GetDeviceExtensionsVk, , (ovrGraphicsLuid luid, char* extensionNames, uint32_t* inoutExtensionNamesSize)) \\r
+ _(ovrResult, ovr_GetSessionPhysicalDeviceVk, , (ovrSession session, ovrGraphicsLuid luid, VkInstance instance, VkPhysicalDevice* out_physicalDevice)) \\r
+ X(ovrResult, ovr_SetSynchonizationQueueVk, , (ovrSession session, VkQueue queue)) \\r
+ _(ovrResult, ovr_SetSynchronizationQueueVk, , (ovrSession session, VkQueue queue)) \\r
+ _(ovrResult, ovr_CreateTextureSwapChainVk, , (ovrSession session, VkDevice device, const ovrTextureSwapChainDesc* desc, ovrTextureSwapChain* out_TextureSwapChain)) \\r
+ _(ovrResult, ovr_GetTextureSwapChainBufferVk, , (ovrSession session, ovrTextureSwapChain chain, int index, VkImage* out_Image)) \\r
+ _(ovrResult, ovr_CreateMirrorTextureWithOptionsVk, , (ovrSession session, VkDevice device, const ovrMirrorTextureDesc* desc, ovrMirrorTexture* out_MirrorTexture)) \\r
+ _(ovrResult, ovr_GetMirrorTextureBufferVk, , (ovrSession session, ovrMirrorTexture mirrorTexture, VkImage* out_Image))\r
+#else\r
+#define OVR_LIST_WIN32_APIS(_,X)\r
+#endif\r
+\r
+#define OVR_LIST_INTERNAL_APIS(_,X)\r
+\r
+// We need to forward declare the ovrSensorData type here, as it won't be in a public OVR_CAPI.h header.\r
+struct ovrSensorData_;\r
+typedef struct ovrSensorData_ ovrSensorData;\r
+\r
+#define OVR_LIST_PRIVATE_APIS(_,X) \\r
+_(ovrTrackingState, ovr_GetTrackingStateWithSensorData, , (ovrSession session, double absTime, ovrBool latencyMarker, ovrSensorData* sensorData)) \\r
+_(ovrResult, ovr_GetDevicePoses, , (ovrSession session, ovrTrackedDeviceType* deviceTypes, int deviceCount, double absTime, ovrPoseStatef* outDevicePoses))\r
+\r
+// clang-format on\r
+\r
+//\r
+// OVR_LIST_APIS - master list of all API entrypoints\r
+//\r
+\r
+#define OVR_LIST_APIS(_, X) \\r
+ OVR_LIST_PUBLIC_APIS(_, X) \\r
+ OVR_LIST_WIN32_APIS(_, X) \\r
+ OVR_LIST_INTERNAL_APIS(_, X) \\r
+ OVR_LIST_PRIVATE_APIS(_, X)\r
+\r
+#endif // OVR_CAPI_Prototypes_h\r
--- /dev/null
+/************************************************************************************\r
+\r
+PublicHeader: OVR_CAPI_Util.c\r
+Copyright : Copyright 2014-2016 Oculus VR, LLC All Rights reserved.\r
+\r
+Licensed under the Oculus VR Rift SDK License Version 3.3 (the "License");\r
+you may not use the Oculus VR Rift SDK except in compliance with the License,\r
+which is provided at the time of installation or download, or which\r
+otherwise accompanies this software in either electronic or hard copy form.\r
+\r
+You may obtain a copy of the License at\r
+\r
+http://www.oculusvr.com/licenses/LICENSE-3.3\r
+\r
+Unless required by applicable law or agreed to in writing, the Oculus VR SDK\r
+distributed under the License is distributed on an "AS IS" BASIS,\r
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+See the License for the specific language governing permissions and\r
+limitations under the License.\r
+\r
+*************************************************************************************/\r
+\r
+#include <Extras/OVR_CAPI_Util.h>\r
+#include <Extras/OVR_StereoProjection.h>\r
+\r
+#include <algorithm>\r
+#include <limits.h>\r
+#include <memory>\r
+\r
+#if !defined(_WIN32)\r
+#include <assert.h>\r
+#endif\r
+\r
+#if defined(_MSC_VER) && _MSC_VER < 1800 // MSVC < 2013\r
+#define round(dbl) \\r
+ (dbl) >= 0.0 ? (int)((dbl) + 0.5) \\r
+ : (((dbl) - (double)(int)(dbl)) <= -0.5 ? (int)(dbl) : (int)((dbl)-0.5))\r
+#endif\r
+\r
+\r
+#if defined(_MSC_VER)\r
+#include <emmintrin.h>\r
+#pragma intrinsic(_mm_pause)\r
+#endif\r
+\r
+#if defined(_WIN32)\r
+// Prevents <Windows.h> from defining min() and max() macro symbols.\r
+#ifndef NOMINMAX\r
+#define NOMINMAX\r
+#endif\r
+\r
+#include <windows.h>\r
+#endif\r
+\r
+// Used to generate projection from ovrEyeDesc::Fov\r
+OVR_PUBLIC_FUNCTION(ovrMatrix4f)\r
+ovrMatrix4f_Projection(ovrFovPort fov, float znear, float zfar, unsigned int projectionModFlags) {\r
+ bool leftHanded = (projectionModFlags & ovrProjection_LeftHanded) > 0;\r
+ bool flipZ = (projectionModFlags & ovrProjection_FarLessThanNear) > 0;\r
+ bool farAtInfinity = (projectionModFlags & ovrProjection_FarClipAtInfinity) > 0;\r
+ bool isOpenGL = (projectionModFlags & ovrProjection_ClipRangeOpenGL) > 0;\r
+\r
+ // TODO: Pass in correct eye to CreateProjection if we want to support canted displays from CAPI\r
+ return OVR::CreateProjection(\r
+ leftHanded, isOpenGL, fov, OVR::StereoEye_Center, znear, zfar, flipZ, farAtInfinity);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrTimewarpProjectionDesc)\r
+ovrTimewarpProjectionDesc_FromProjection(ovrMatrix4f Projection, unsigned int projectionModFlags) {\r
+ ovrTimewarpProjectionDesc res;\r
+ res.Projection22 = Projection.M[2][2];\r
+ res.Projection23 = Projection.M[2][3];\r
+ res.Projection32 = Projection.M[3][2];\r
+\r
+ if ((res.Projection32 != 1.0f) && (res.Projection32 != -1.0f)) {\r
+ // This is a very strange projection matrix, and probably won't work.\r
+ // If you need it to work, please contact Oculus and let us know your usage scenario.\r
+ }\r
+\r
+ if ((projectionModFlags & ovrProjection_ClipRangeOpenGL) != 0) {\r
+ // Internally we use the D3D range of [0,+w] not the OGL one of [-w,+w], so we need to convert\r
+ // one to the other.\r
+ // Note that the values in the depth buffer, and the actual linear depth we want is the same for\r
+ // both APIs,\r
+ // the difference is purely in the values inside the projection matrix.\r
+\r
+ // D3D does this:\r
+ // depthBuffer = ( ProjD3D.M[2][2] * linearDepth + ProjD3D.M[2][3] ) / ( linearDepth\r
+ // * ProjD3D.M[3][2] );\r
+ // OGL does this:\r
+ // depthBuffer = 0.5 + 0.5 * ( ProjOGL.M[2][2] * linearDepth + ProjOGL.M[2][3] ) / ( linearDepth\r
+ // * ProjOGL.M[3][2] );\r
+\r
+ // Therefore:\r
+ // ProjD3D.M[2][2] = 0.5 * ( ProjOGL.M[2][2] + ProjOGL.M[3][2] );\r
+ // ProjD3D.M[2][3] = 0.5 * ProjOGL.M[2][3];\r
+ // ProjD3D.M[3][2] = ProjOGL.M[3][2];\r
+\r
+ res.Projection22 = 0.5f * (Projection.M[2][2] + Projection.M[3][2]);\r
+ res.Projection23 = 0.5f * Projection.M[2][3];\r
+ res.Projection32 = Projection.M[3][2];\r
+ }\r
+ return res;\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrMatrix4f)\r
+ovrMatrix4f_OrthoSubProjection(\r
+ ovrMatrix4f projection,\r
+ ovrVector2f orthoScale,\r
+ float orthoDistance,\r
+ float hmdToEyeOffsetX) {\r
+ ovrMatrix4f ortho;\r
+ // Negative sign is correct!\r
+ // If the eye is offset to the left, then the ortho view needs to be offset to the right relative\r
+ // to the camera.\r
+ float orthoHorizontalOffset = -hmdToEyeOffsetX / orthoDistance;\r
+\r
+ // Current projection maps real-world vector (x,y,1) to the RT.\r
+ // We want to find the projection that maps the range [-FovPixels/2,FovPixels/2] to\r
+ // the physical [-orthoHalfFov,orthoHalfFov]\r
+ // Note moving the offset from M[0][2]+M[1][2] to M[0][3]+M[1][3] - this means\r
+ // we don't have to feed in Z=1 all the time.\r
+ // The horizontal offset math is a little hinky because the destination is\r
+ // actually [-orthoHalfFov+orthoHorizontalOffset,orthoHalfFov+orthoHorizontalOffset]\r
+ // So we need to first map [-FovPixels/2,FovPixels/2] to\r
+ // [-orthoHalfFov+orthoHorizontalOffset,orthoHalfFov+orthoHorizontalOffset]:\r
+ // x1 = x0 * orthoHalfFov/(FovPixels/2) + orthoHorizontalOffset;\r
+ // = x0 * 2*orthoHalfFov/FovPixels + orthoHorizontalOffset;\r
+ // But then we need the same mapping as the existing projection matrix, i.e.\r
+ // x2 = x1 * Projection.M[0][0] + Projection.M[0][2];\r
+ // = x0 * (2*orthoHalfFov/FovPixels + orthoHorizontalOffset) * Projection.M[0][0] +\r
+ // Projection.M[0][2]; = x0 * Projection.M[0][0]*2*orthoHalfFov/FovPixels +\r
+ // orthoHorizontalOffset*Projection.M[0][0] + Projection.M[0][2];\r
+ // So in the new projection matrix we need to scale by Projection.M[0][0]*2*orthoHalfFov/FovPixels\r
+ // and offset by orthoHorizontalOffset*Projection.M[0][0] + Projection.M[0][2].\r
+\r
+ ortho.M[0][0] = projection.M[0][0] * orthoScale.x;\r
+ ortho.M[0][1] = 0.0f;\r
+ ortho.M[0][2] = 0.0f;\r
+ ortho.M[0][3] = -projection.M[0][2] + (orthoHorizontalOffset * projection.M[0][0]);\r
+\r
+ ortho.M[1][0] = 0.0f;\r
+ ortho.M[1][1] =\r
+ -projection.M[1][1] * orthoScale.y; /* Note sign flip (text rendering uses Y=down). */\r
+ ortho.M[1][2] = 0.0f;\r
+ ortho.M[1][3] = -projection.M[1][2];\r
+\r
+ ortho.M[2][0] = 0.0f;\r
+ ortho.M[2][1] = 0.0f;\r
+ ortho.M[2][2] = 0.0f;\r
+ ortho.M[2][3] = 0.0f;\r
+\r
+ /* No perspective correction for ortho. */\r
+ ortho.M[3][0] = 0.0f;\r
+ ortho.M[3][1] = 0.0f;\r
+ ortho.M[3][2] = 0.0f;\r
+ ortho.M[3][3] = 1.0f;\r
+\r
+ return ortho;\r
+}\r
+\r
+#undef ovr_CalcEyePoses\r
+OVR_PUBLIC_FUNCTION(void)\r
+ovr_CalcEyePoses(ovrPosef headPose, const ovrVector3f hmdToEyeOffset[2], ovrPosef outEyePoses[2]) {\r
+ if (!hmdToEyeOffset || !outEyePoses) {\r
+ return;\r
+ }\r
+\r
+ using OVR::Posef;\r
+ using OVR::Vector3f;\r
+\r
+ // Currently hmdToEyeOffset is only a 3D vector\r
+ outEyePoses[0] =\r
+ Posef(headPose.Orientation, ((Posef)headPose).Apply((Vector3f)hmdToEyeOffset[0]));\r
+ outEyePoses[1] =\r
+ Posef(headPose.Orientation, ((Posef)headPose).Apply((Vector3f)hmdToEyeOffset[1]));\r
+}\r
+\r
+OVR_PRIVATE_FUNCTION(void)\r
+ovr_CalcEyePoses2(ovrPosef headPose, const ovrPosef hmdToEyePose[2], ovrPosef outEyePoses[2]) {\r
+ if (!hmdToEyePose || !outEyePoses) {\r
+ return;\r
+ }\r
+\r
+ using OVR::Posef;\r
+ using OVR::Vector3f;\r
+\r
+ outEyePoses[0] = (Posef)headPose * (Posef)hmdToEyePose[0];\r
+ outEyePoses[1] = (Posef)headPose * (Posef)hmdToEyePose[1];\r
+}\r
+\r
+#undef ovr_GetEyePoses\r
+OVR_PUBLIC_FUNCTION(void)\r
+ovr_GetEyePoses(\r
+ ovrSession session,\r
+ long long frameIndex,\r
+ ovrBool latencyMarker,\r
+ const ovrVector3f hmdToEyeOffset[2],\r
+ ovrPosef outEyePoses[2],\r
+ double* outSensorSampleTime) {\r
+ double frameTime = ovr_GetPredictedDisplayTime(session, frameIndex);\r
+ ovrTrackingState trackingState = ovr_GetTrackingState(session, frameTime, latencyMarker);\r
+ ovr_CalcEyePoses(trackingState.HeadPose.ThePose, hmdToEyeOffset, outEyePoses);\r
+\r
+ if (outSensorSampleTime != nullptr) {\r
+ *outSensorSampleTime = ovr_GetTimeInSeconds();\r
+ }\r
+}\r
+\r
+OVR_PRIVATE_FUNCTION(void)\r
+ovr_GetEyePoses2(\r
+ ovrSession session,\r
+ long long frameIndex,\r
+ ovrBool latencyMarker,\r
+ const ovrPosef hmdToEyePose[2],\r
+ ovrPosef outEyePoses[2],\r
+ double* outSensorSampleTime) {\r
+ double frameTime = ovr_GetPredictedDisplayTime(session, frameIndex);\r
+ ovrTrackingState trackingState = ovr_GetTrackingState(session, frameTime, latencyMarker);\r
+ ovr_CalcEyePoses2(trackingState.HeadPose.ThePose, hmdToEyePose, outEyePoses);\r
+\r
+ if (outSensorSampleTime != nullptr) {\r
+ *outSensorSampleTime = ovr_GetTimeInSeconds();\r
+ }\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrDetectResult) ovr_Detect(int timeoutMilliseconds) {\r
+ // Initially we assume everything is not running.\r
+ ovrDetectResult result;\r
+ result.IsOculusHMDConnected = ovrFalse;\r
+ result.IsOculusServiceRunning = ovrFalse;\r
+\r
+#if !defined(OSX_UNIMPLEMENTED)\r
+ // Attempt to open the named event.\r
+ HANDLE hServiceEvent = ::OpenEventW(SYNCHRONIZE, FALSE, OVR_HMD_CONNECTED_EVENT_NAME);\r
+\r
+ // If event exists,\r
+ if (hServiceEvent != nullptr) {\r
+ // This indicates that the Oculus Runtime is installed and running.\r
+ result.IsOculusServiceRunning = ovrTrue;\r
+\r
+ // Poll for event state.\r
+ DWORD objectResult = ::WaitForSingleObject(hServiceEvent, timeoutMilliseconds);\r
+\r
+ // If the event is signaled,\r
+ if (objectResult == WAIT_OBJECT_0) {\r
+ // This indicates that the Oculus HMD is connected.\r
+ result.IsOculusHMDConnected = ovrTrue;\r
+ }\r
+\r
+ ::CloseHandle(hServiceEvent);\r
+ }\r
+#else\r
+ (void)timeoutMilliseconds;\r
+ fprintf(stderr, __FILE__ "::[%s] Not implemented. Assuming single-process.\n", __func__);\r
+ result.IsOculusServiceRunning = ovrTrue;\r
+ result.IsOculusHMDConnected = ovrTrue;\r
+#endif // OSX_UNIMPLEMENTED\r
+\r
+\r
+ return result;\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(void) ovrPosef_FlipHandedness(const ovrPosef* inPose, ovrPosef* outPose) {\r
+ outPose->Orientation.x = -inPose->Orientation.x;\r
+ outPose->Orientation.y = inPose->Orientation.y;\r
+ outPose->Orientation.z = inPose->Orientation.z;\r
+ outPose->Orientation.w = -inPose->Orientation.w;\r
+\r
+ outPose->Position.x = -inPose->Position.x;\r
+ outPose->Position.y = inPose->Position.y;\r
+ outPose->Position.z = inPose->Position.z;\r
+}\r
+\r
+static float wavPcmBytesToFloat(const void* data, int32_t sizeInBits, bool swapBytes) {\r
+ // TODO Support big endian\r
+ (void)swapBytes;\r
+\r
+ // There's not a strong standard to convert 8/16/32b PCM to float.\r
+ // For 16b: MSDN says range is [-32760, 32760], Pyton Scipy uses [-32767, 32767] and Audacity\r
+ // outputs the full range [-32768, 32767].\r
+ // We use the same range on both sides and clamp to [-1, 1].\r
+\r
+ float result = 0.0f;\r
+ if (sizeInBits == 8)\r
+ // uint8_t is a special case, unsigned where 128 is zero\r
+ result = (*((uint8_t*)data) / (float)UCHAR_MAX) * 2.0f - 1.0f;\r
+ else if (sizeInBits == 16)\r
+ result = *((int16_t*)data) / (float)SHRT_MAX;\r
+ // else if (sizeInBits == 24) {\r
+ // int value = data[0] | data[1] << 8 | data[2] << 16; // Need consider 2's complement\r
+ // return value / 8388607.0f;\r
+ //}\r
+ else if (sizeInBits == 32)\r
+ result = *((int32_t*)data) / (float)INT_MAX;\r
+\r
+ return std::max(-1.0f, result);\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_GenHapticsFromAudioData(\r
+ ovrHapticsClip* outHapticsClip,\r
+ const ovrAudioChannelData* audioChannel,\r
+ ovrHapticsGenMode genMode) {\r
+ if (!outHapticsClip || !audioChannel || genMode != ovrHapticsGenMode_PointSample)\r
+ return ovrError_InvalidParameter;\r
+ // Validate audio channel\r
+ if (audioChannel->Frequency <= 0 || audioChannel->SamplesCount <= 0 ||\r
+ audioChannel->Samples == nullptr)\r
+ return ovrError_InvalidParameter;\r
+\r
+ const int32_t kHapticsFrequency = 320;\r
+ const int32_t kHapticsMaxAmplitude = 255;\r
+ float samplesPerStep = audioChannel->Frequency / (float)kHapticsFrequency;\r
+ int32_t hapticsSampleCount = (int32_t)ceil(audioChannel->SamplesCount / samplesPerStep);\r
+\r
+ uint8_t* hapticsSamples = new uint8_t[hapticsSampleCount];\r
+ for (int32_t i = 0; i < hapticsSampleCount; ++i) {\r
+ float sample = audioChannel->Samples[(int32_t)(i * samplesPerStep)];\r
+ uint8_t hapticSample =\r
+ (uint8_t)std::min(UCHAR_MAX, (int)round(fabs(sample) * kHapticsMaxAmplitude));\r
+ hapticsSamples[i] = hapticSample;\r
+ }\r
+\r
+ outHapticsClip->Samples = hapticsSamples;\r
+ outHapticsClip->SamplesCount = hapticsSampleCount;\r
+\r
+ return ovrSuccess;\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(ovrResult)\r
+ovr_ReadWavFromBuffer(\r
+ ovrAudioChannelData* outAudioChannel,\r
+ const void* inputData,\r
+ int dataSizeInBytes,\r
+ int stereoChannelToUse) {\r
+ // We don't support any format other than PCM and IEEE Float\r
+ enum WavFormats {\r
+ kWavFormatUnknown = 0x0000,\r
+ kWavFormatLPCM = 0x0001,\r
+ kWavFormatFloatIEEE = 0x0003,\r
+ kWavFormatExtensible = 0xFFFE\r
+ };\r
+\r
+ struct WavHeader {\r
+ char RiffId[4]; // "RIFF" = little-endian, "RIFX" = big-endian\r
+ int32_t Size; // 4 + (8 + FmtChunkSize) + (8 + DataChunkSize)\r
+ char WavId[4]; // Must be "WAVE"\r
+\r
+ char FmtChunckId[4]; // Must be "fmt "\r
+ uint32_t FmtChunkSize; // Remaining size of this chunk (16B)\r
+ uint16_t Format; // WavFormats: PCM or Float supported\r
+ uint16_t Channels; // 1 = Mono, 2 = Stereo\r
+ uint32_t SampleRate; // e.g. 44100\r
+ uint32_t BytesPerSec; // SampleRate * BytesPerBlock\r
+ uint16_t BytesPerBlock; // (NumChannels * BitsPerSample/8)\r
+ uint16_t BitsPerSample; // 8, 16, 32\r
+\r
+ char DataChunckId[4]; // Must be "data"\r
+ uint32_t DataChunkSize; // Remaining size of this chunk\r
+ };\r
+\r
+ const int32_t kMinWavFileSize = sizeof(WavHeader) + 1;\r
+ if (!outAudioChannel || !inputData || dataSizeInBytes < kMinWavFileSize)\r
+ return ovrError_InvalidParameter;\r
+\r
+ WavHeader* header = (WavHeader*)inputData;\r
+ uint8_t* data = (uint8_t*)inputData + sizeof(WavHeader);\r
+\r
+ // Validate\r
+ const char* wavId = header->RiffId;\r
+ // TODO We need to support RIFX when supporting big endian formats\r
+ // bool isValidWav = (wavId[0] == 'R' && wavId[1] == 'I' && wavId[2] == 'F' && (wavId[3] == 'F' ||\r
+ // wavId[3] == 'X')) &&\r
+ bool isValidWav = (wavId[0] == 'R' && wavId[1] == 'I' && wavId[2] == 'F' && wavId[3] == 'F') &&\r
+ memcmp(header->WavId, "WAVE", 4) == 0;\r
+ bool hasValidChunks =\r
+ memcmp(header->FmtChunckId, "fmt ", 4) == 0 && memcmp(header->DataChunckId, "data ", 4) == 0;\r
+ if (!isValidWav || !hasValidChunks) {\r
+ return ovrError_InvalidOperation;\r
+ }\r
+\r
+ // We only support PCM\r
+ bool isSupported = (header->Format == kWavFormatLPCM || header->Format == kWavFormatFloatIEEE) &&\r
+ (header->Channels == 1 || header->Channels == 2) &&\r
+ (header->BitsPerSample == 8 || header->BitsPerSample == 16 || header->BitsPerSample == 32);\r
+ if (!isSupported) {\r
+ return ovrError_Unsupported;\r
+ }\r
+\r
+ // Channel selection\r
+ bool useSecondChannel = (header->Channels == 2 && stereoChannelToUse == 1);\r
+ int32_t channelOffset = (useSecondChannel) ? header->BytesPerBlock / 2 : 0;\r
+\r
+ // TODO Support big-endian\r
+ int32_t blockCount = header->DataChunkSize / header->BytesPerBlock;\r
+ float* samples = new float[blockCount];\r
+\r
+ for (int32_t i = 0; i < blockCount; i++) {\r
+ int32_t dataIndex = i * header->BytesPerBlock;\r
+ uint8_t* dataPtr = &data[dataIndex + channelOffset];\r
+ float sample = (header->Format == kWavFormatLPCM)\r
+ ? wavPcmBytesToFloat(dataPtr, header->BitsPerSample, false)\r
+ : *(float*)dataPtr;\r
+\r
+ samples[i] = sample;\r
+ }\r
+\r
+ // Output\r
+ outAudioChannel->Samples = samples;\r
+ outAudioChannel->SamplesCount = blockCount;\r
+ outAudioChannel->Frequency = header->SampleRate;\r
+\r
+ return ovrSuccess;\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(void) ovr_ReleaseAudioChannelData(ovrAudioChannelData* audioChannel) {\r
+ if (audioChannel != nullptr && audioChannel->Samples != nullptr) {\r
+ delete[] audioChannel->Samples;\r
+ memset(audioChannel, 0, sizeof(ovrAudioChannelData));\r
+ }\r
+}\r
+\r
+OVR_PUBLIC_FUNCTION(void) ovr_ReleaseHapticsClip(ovrHapticsClip* hapticsClip) {\r
+ if (hapticsClip != nullptr && hapticsClip->Samples != nullptr) {\r
+ delete[](uint8_t*) hapticsClip->Samples;\r
+ memset(hapticsClip, 0, sizeof(ovrHapticsClip));\r
+ }\r
+}\r
--- /dev/null
+/************************************************************************************\r
+\r
+Filename : OVR_StereoProjection.cpp\r
+Content : Stereo rendering functions\r
+Created : November 30, 2013\r
+Authors : Tom Fosyth\r
+\r
+Copyright : Copyright 2014-2016 Oculus VR, LLC All Rights reserved.\r
+\r
+Licensed under the Oculus VR Rift SDK License Version 3.3 (the "License");\r
+you may not use the Oculus VR Rift SDK except in compliance with the License,\r
+which is provided at the time of installation or download, or which\r
+otherwise accompanies this software in either electronic or hard copy form.\r
+\r
+You may obtain a copy of the License at\r
+\r
+http://www.oculusvr.com/licenses/LICENSE-3.3\r
+\r
+Unless required by applicable law or agreed to in writing, the Oculus VR SDK\r
+distributed under the License is distributed on an "AS IS" BASIS,\r
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+See the License for the specific language governing permissions and\r
+limitations under the License.\r
+\r
+*************************************************************************************/\r
+\r
+#include <Extras/OVR_StereoProjection.h>\r
+\r
+namespace OVR {\r
+\r
+ScaleAndOffset2D CreateNDCScaleAndOffsetFromFov(FovPort tanHalfFov) {\r
+ float projXScale = 2.0f / (tanHalfFov.LeftTan + tanHalfFov.RightTan);\r
+ float projXOffset = (tanHalfFov.LeftTan - tanHalfFov.RightTan) * projXScale * 0.5f;\r
+ float projYScale = 2.0f / (tanHalfFov.UpTan + tanHalfFov.DownTan);\r
+ float projYOffset = (tanHalfFov.UpTan - tanHalfFov.DownTan) * projYScale * 0.5f;\r
+\r
+ ScaleAndOffset2D result;\r
+ result.Scale = Vector2f(projXScale, projYScale);\r
+ result.Offset = Vector2f(projXOffset, projYOffset);\r
+ // Hey - why is that Y.Offset negated?\r
+ // It's because a projection matrix transforms from world coords with Y=up,\r
+ // whereas this is from NDC which is Y=down.\r
+\r
+ return result;\r
+}\r
+\r
+Matrix4f CreateProjection(\r
+ bool leftHanded,\r
+ bool isOpenGL,\r
+ FovPort tanHalfFov,\r
+ StereoEye /*eye*/,\r
+ float zNear /*= 0.01f*/,\r
+ float zFar /*= 10000.0f*/,\r
+ bool flipZ /*= false*/,\r
+ bool farAtInfinity /*= false*/) {\r
+ if (!flipZ && farAtInfinity) {\r
+ // OVR_ASSERT_M(false, "Cannot push Far Clip to Infinity when Z-order is not flipped");\r
+ // Assertion disabled because this code no longer has access to LibOVRKernel assertion\r
+ // functionality.\r
+ farAtInfinity = false;\r
+ }\r
+\r
+ // A projection matrix is very like a scaling from NDC, so we can start with that.\r
+ ScaleAndOffset2D scaleAndOffset = CreateNDCScaleAndOffsetFromFov(tanHalfFov);\r
+\r
+ float handednessScale = leftHanded ? 1.0f : -1.0f;\r
+\r
+ Matrix4f projection;\r
+ // Produces X result, mapping clip edges to [-w,+w]\r
+ projection.M[0][0] = scaleAndOffset.Scale.x;\r
+ projection.M[0][1] = 0.0f;\r
+ projection.M[0][2] = handednessScale * scaleAndOffset.Offset.x;\r
+ projection.M[0][3] = 0.0f;\r
+\r
+ // Produces Y result, mapping clip edges to [-w,+w]\r
+ // Hey - why is that YOffset negated?\r
+ // It's because a projection matrix transforms from world coords with Y=up,\r
+ // whereas this is derived from an NDC scaling, which is Y=down.\r
+ projection.M[1][0] = 0.0f;\r
+ projection.M[1][1] = scaleAndOffset.Scale.y;\r
+ projection.M[1][2] = handednessScale * -scaleAndOffset.Offset.y;\r
+ projection.M[1][3] = 0.0f;\r
+\r
+ // Produces Z-buffer result - app needs to fill this in with whatever Z range it wants.\r
+ // We'll just use some defaults for now.\r
+ projection.M[2][0] = 0.0f;\r
+ projection.M[2][1] = 0.0f;\r
+\r
+ if (farAtInfinity) {\r
+ if (isOpenGL) {\r
+ // It's not clear this makes sense for OpenGL - you don't get the same precision benefits you\r
+ // do in D3D.\r
+ projection.M[2][2] = -handednessScale;\r
+ projection.M[2][3] = 2.0f * zNear;\r
+ } else {\r
+ projection.M[2][2] = 0.0f;\r
+ projection.M[2][3] = zNear;\r
+ }\r
+ } else {\r
+ if (isOpenGL) {\r
+ // Clip range is [-w,+w], so 0 is at the middle of the range.\r
+ projection.M[2][2] =\r
+ -handednessScale * (flipZ ? -1.0f : 1.0f) * (zNear + zFar) / (zNear - zFar);\r
+ projection.M[2][3] = 2.0f * ((flipZ ? -zFar : zFar) * zNear) / (zNear - zFar);\r
+ } else {\r
+ // Clip range is [0,+w], so 0 is at the start of the range.\r
+ projection.M[2][2] = -handednessScale * (flipZ ? -zNear : zFar) / (zNear - zFar);\r
+ projection.M[2][3] = ((flipZ ? -zFar : zFar) * zNear) / (zNear - zFar);\r
+ }\r
+ }\r
+\r
+ // Produces W result (= Z in)\r
+ projection.M[3][0] = 0.0f;\r
+ projection.M[3][1] = 0.0f;\r
+ projection.M[3][2] = handednessScale;\r
+ projection.M[3][3] = 0.0f;\r
+\r
+ return projection;\r
+}\r
+\r
+Matrix4f CreateOrthoSubProjection(\r
+ bool /*rightHanded*/,\r
+ StereoEye eyeType,\r
+ float tanHalfFovX,\r
+ float tanHalfFovY,\r
+ float unitsX,\r
+ float unitsY,\r
+ float distanceFromCamera,\r
+ float interpupillaryDistance,\r
+ Matrix4f const& projection,\r
+ float zNear /*= 0.0f*/,\r
+ float zFar /*= 0.0f*/,\r
+ bool flipZ /*= false*/,\r
+ bool farAtInfinity /*= false*/) {\r
+ if (!flipZ && farAtInfinity) {\r
+ // OVR_ASSERT_M(false, "Cannot push Far Clip to Infinity when Z-order is not flipped");\r
+ // Assertion disabled because this code no longer has access to LibOVRKernel assertion\r
+ // functionality.\r
+ farAtInfinity = false;\r
+ }\r
+\r
+ float orthoHorizontalOffset = interpupillaryDistance * 0.5f / distanceFromCamera;\r
+ switch (eyeType) {\r
+ case StereoEye_Left:\r
+ break;\r
+ case StereoEye_Right:\r
+ orthoHorizontalOffset = -orthoHorizontalOffset;\r
+ break;\r
+ case StereoEye_Center:\r
+ orthoHorizontalOffset = 0.0f;\r
+ break;\r
+ default:\r
+ break;\r
+ }\r
+\r
+ // Current projection maps real-world vector (x,y,1) to the RT.\r
+ // We want to find the projection that maps the range [-FovPixels/2,FovPixels/2] to\r
+ // the physical [-orthoHalfFov,orthoHalfFov]\r
+ // Note moving the offset from M[0][2]+M[1][2] to M[0][3]+M[1][3] - this means\r
+ // we don't have to feed in Z=1 all the time.\r
+ // The horizontal offset math is a little hinky because the destination is\r
+ // actually [-orthoHalfFov+orthoHorizontalOffset,orthoHalfFov+orthoHorizontalOffset]\r
+ // So we need to first map [-FovPixels/2,FovPixels/2] to\r
+ // [-orthoHalfFov+orthoHorizontalOffset,orthoHalfFov+orthoHorizontalOffset]:\r
+ // x1 = x0 * orthoHalfFov/(FovPixels/2) + orthoHorizontalOffset;\r
+ // = x0 * 2*orthoHalfFov/FovPixels + orthoHorizontalOffset;\r
+ // But then we need the sam mapping as the existing projection matrix, i.e.\r
+ // x2 = x1 * Projection.M[0][0] + Projection.M[0][2];\r
+ // = x0 * (2*orthoHalfFov/FovPixels + orthoHorizontalOffset) * Projection.M[0][0] +\r
+ // Projection.M[0][2];\r
+ // = x0 * Projection.M[0][0]*2*orthoHalfFov/FovPixels +\r
+ // orthoHorizontalOffset*Projection.M[0][0] + Projection.M[0][2];\r
+ // So in the new projection matrix we need to scale by Projection.M[0][0]*2*orthoHalfFov/FovPixels\r
+ // and\r
+ // offset by orthoHorizontalOffset*Projection.M[0][0] + Projection.M[0][2].\r
+\r
+ float orthoScaleX = 2.0f * tanHalfFovX / unitsX;\r
+ float orthoScaleY = 2.0f * tanHalfFovY / unitsY;\r
+ Matrix4f ortho;\r
+ ortho.M[0][0] = projection.M[0][0] * orthoScaleX;\r
+ ortho.M[0][1] = 0.0f;\r
+ ortho.M[0][2] = 0.0f;\r
+ ortho.M[0][3] = -projection.M[0][2] + (orthoHorizontalOffset * projection.M[0][0]);\r
+\r
+ ortho.M[1][0] = 0.0f;\r
+ ortho.M[1][1] = -projection.M[1][1] * orthoScaleY; // Note sign flip (text rendering uses Y=down).\r
+ ortho.M[1][2] = 0.0f;\r
+ ortho.M[1][3] = -projection.M[1][2];\r
+\r
+ const float zDiff = zNear - zFar;\r
+ if (fabsf(zDiff) < 0.001f) {\r
+ ortho.M[2][0] = 0.0f;\r
+ ortho.M[2][1] = 0.0f;\r
+ ortho.M[2][2] = 0.0f;\r
+ ortho.M[2][3] = flipZ ? zNear : zFar;\r
+ } else {\r
+ ortho.M[2][0] = 0.0f;\r
+ ortho.M[2][1] = 0.0f;\r
+\r
+ if (farAtInfinity) {\r
+ ortho.M[2][2] = 0.0f;\r
+ ortho.M[2][3] = zNear;\r
+ } else if (zDiff != 0.0f) {\r
+ ortho.M[2][2] = (flipZ ? zNear : zFar) / zDiff;\r
+ ortho.M[2][3] = ((flipZ ? -zFar : zFar) * zNear) / zDiff;\r
+ }\r
+ }\r
+\r
+ // No perspective correction for ortho.\r
+ ortho.M[3][0] = 0.0f;\r
+ ortho.M[3][1] = 0.0f;\r
+ ortho.M[3][2] = 0.0f;\r
+ ortho.M[3][3] = 1.0f;\r
+\r
+ return ortho;\r
+}\r
+\r
+} // namespace OVR\r