From: Andreas Eversberg Date: Fri, 11 May 2018 11:12:47 +0000 (+0200) Subject: Version 1.24 X-Git-Url: http://git.eversberg.eu/gitweb.cgi?p=libovr-mingw-w64-jolly.git;a=commitdiff_plain;h=d27d01edcbc6f98a8bd97954156726b0f0825ff6 Version 1.24 --- d27d01edcbc6f98a8bd97954156726b0f0825ff6 diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..774350a --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +libovr.dll diff --git a/Include/Extras/OVR_CAPI_Util.h b/Include/Extras/OVR_CAPI_Util.h new file mode 100755 index 0000000..cdf7d33 --- /dev/null +++ b/Include/Extras/OVR_CAPI_Util.h @@ -0,0 +1,283 @@ +/********************************************************************************/ /** + \file OVR_CAPI_Util.h + \brief This header provides LibOVR utility function declarations + \copyright Copyright 2015-2016 Oculus VR, LLC All Rights reserved. + *************************************************************************************/ + +#ifndef OVR_CAPI_Util_h +#define OVR_CAPI_Util_h + +#include "OVR_CAPI.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/// Enumerates modifications to the projection matrix based on the application's needs. +/// +/// \see ovrMatrix4f_Projection +/// +typedef enum ovrProjectionModifier_ { + /// Use for generating a default projection matrix that is: + /// * Right-handed. + /// * Near depth values stored in the depth buffer are smaller than far depth values. + /// * Both near and far are explicitly defined. + /// * With a clipping range that is (0 to w). + ovrProjection_None = 0x00, + + /// Enable if using left-handed transformations in your application. + ovrProjection_LeftHanded = 0x01, + + /// After the projection transform is applied, far values stored in the depth buffer will be less + /// than closer depth values. + /// NOTE: Enable only if the application is using a floating-point depth buffer for proper + /// precision. + ovrProjection_FarLessThanNear = 0x02, + + /// When this flag is used, the zfar value pushed into ovrMatrix4f_Projection() will be ignored + /// NOTE: Enable only if ovrProjection_FarLessThanNear is also enabled where the far clipping + /// plane will be pushed to infinity. + ovrProjection_FarClipAtInfinity = 0x04, + + /// Enable if the application is rendering with OpenGL and expects a projection matrix with a + /// clipping range of (-w to w). + /// Ignore this flag if your application already handles the conversion from D3D range (0 to w) to + /// OpenGL. + ovrProjection_ClipRangeOpenGL = 0x08, +} ovrProjectionModifier; + +/// Return values for ovr_Detect. +/// +/// \see ovr_Detect +/// +typedef struct OVR_ALIGNAS(8) ovrDetectResult_ { + /// Is ovrFalse when the Oculus Service is not running. + /// This means that the Oculus Service is either uninstalled or stopped. + /// IsOculusHMDConnected will be ovrFalse in this case. + /// Is ovrTrue when the Oculus Service is running. + /// This means that the Oculus Service is installed and running. + /// IsOculusHMDConnected will reflect the state of the HMD. + ovrBool IsOculusServiceRunning; + + /// Is ovrFalse when an Oculus HMD is not detected. + /// If the Oculus Service is not running, this will be ovrFalse. + /// Is ovrTrue when an Oculus HMD is detected. + /// This implies that the Oculus Service is also installed and running. + ovrBool IsOculusHMDConnected; + + OVR_UNUSED_STRUCT_PAD(pad0, 6) ///< \internal struct padding + +} ovrDetectResult; + +OVR_STATIC_ASSERT(sizeof(ovrDetectResult) == 8, "ovrDetectResult size mismatch"); + +/// Modes used to generate Touch Haptics from audio PCM buffer. +/// +typedef enum ovrHapticsGenMode_ { + /// Point sample original signal at Haptics frequency + ovrHapticsGenMode_PointSample, + ovrHapticsGenMode_Count +} ovrHapticsGenMode; + +/// Store audio PCM data (as 32b float samples) for an audio channel. +/// Note: needs to be released with ovr_ReleaseAudioChannelData to avoid memory leak. +/// +typedef struct ovrAudioChannelData_ { + /// Samples stored as floats [-1.0f, 1.0f]. + const float* Samples; + + /// Number of samples + int SamplesCount; + + /// Frequency (e.g. 44100) + int Frequency; +} ovrAudioChannelData; + +/// Store a full Haptics clip, which can be used as data source for multiple ovrHapticsBuffers. +/// +typedef struct ovrHapticsClip_ { + /// Samples stored in opaque format + const void* Samples; + + /// Number of samples + int SamplesCount; +} ovrHapticsClip; + +/// Detects Oculus Runtime and Device Status +/// +/// Checks for Oculus Runtime and Oculus HMD device status without loading the LibOVRRT +/// shared library. This may be called before ovr_Initialize() to help decide whether or +/// not to initialize LibOVR. +/// +/// \param[in] timeoutMilliseconds Specifies a timeout to wait for HMD to be attached or 0 to poll. +/// +/// \return Returns an ovrDetectResult object indicating the result of detection. +/// +/// \see ovrDetectResult +/// +OVR_PUBLIC_FUNCTION(ovrDetectResult) ovr_Detect(int timeoutMilliseconds); + +// On the Windows platform, +#ifdef _WIN32 +/// This is the Windows Named Event name that is used to check for HMD connected state. +#define OVR_HMD_CONNECTED_EVENT_NAME L"OculusHMDConnected" +#endif // _WIN32 + +/// Used to generate projection from ovrEyeDesc::Fov. +/// +/// \param[in] fov Specifies the ovrFovPort to use. +/// \param[in] znear Distance to near Z limit. +/// \param[in] zfar Distance to far Z limit. +/// \param[in] projectionModFlags A combination of the ovrProjectionModifier flags. +/// +/// \return Returns the calculated projection matrix. +/// +/// \see ovrProjectionModifier +/// +OVR_PUBLIC_FUNCTION(ovrMatrix4f) +ovrMatrix4f_Projection(ovrFovPort fov, float znear, float zfar, unsigned int projectionModFlags); + +/// Extracts the required data from the result of ovrMatrix4f_Projection. +/// +/// \param[in] projection Specifies the project matrix from which to +/// extract ovrTimewarpProjectionDesc. +/// \param[in] projectionModFlags A combination of the ovrProjectionModifier flags. +/// \return Returns the extracted ovrTimewarpProjectionDesc. +/// \see ovrTimewarpProjectionDesc +/// +OVR_PUBLIC_FUNCTION(ovrTimewarpProjectionDesc) +ovrTimewarpProjectionDesc_FromProjection(ovrMatrix4f projection, unsigned int projectionModFlags); + +/// Generates an orthographic sub-projection. +/// +/// Used for 2D rendering, Y is down. +/// +/// \param[in] projection The perspective matrix that the orthographic matrix is derived from. +/// \param[in] orthoScale Equal to 1.0f / pixelsPerTanAngleAtCenter. +/// \param[in] orthoDistance Equal to the distance from the camera in meters, such as 0.8m. +/// \param[in] HmdToEyeOffsetX Specifies the offset of the eye from the center. +/// +/// \return Returns the calculated projection matrix. +/// +OVR_PUBLIC_FUNCTION(ovrMatrix4f) +ovrMatrix4f_OrthoSubProjection( + ovrMatrix4f projection, + ovrVector2f orthoScale, + float orthoDistance, + float HmdToEyeOffsetX); + +/// Computes offset eye poses based on headPose returned by ovrTrackingState. +/// +/// \param[in] headPose Indicates the HMD position and orientation to use for the calculation. +/// \param[in] hmdToEyePose Can be ovrEyeRenderDesc.HmdToEyePose returned from +/// ovr_GetRenderDesc. For monoscopic rendering, use a position vector that is average +/// of the two position vectors for each eyes. +/// \param[out] outEyePoses If outEyePoses are used for rendering, they should be passed to +/// ovr_SubmitFrame in ovrLayerEyeFov::RenderPose or ovrLayerEyeFovDepth::RenderPose. +/// +#undef ovr_CalcEyePoses +OVR_PUBLIC_FUNCTION(void) +ovr_CalcEyePoses(ovrPosef headPose, const ovrVector3f hmdToEyeOffset[2], ovrPosef outEyePoses[2]); +OVR_PRIVATE_FUNCTION(void) +ovr_CalcEyePoses2(ovrPosef headPose, const ovrPosef HmdToEyePose[2], ovrPosef outEyePoses[2]); +#define ovr_CalcEyePoses ovr_CalcEyePoses2 + +/// Returns the predicted head pose in outHmdTrackingState and offset eye poses in outEyePoses. +/// +/// This is a thread-safe function where caller should increment frameIndex with every frame +/// and pass that index where applicable to functions called on the rendering thread. +/// Assuming outEyePoses are used for rendering, it should be passed as a part of ovrLayerEyeFov. +/// The caller does not need to worry about applying HmdToEyePose to the returned outEyePoses +/// variables. +/// +/// \param[in] hmd Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] frameIndex Specifies the targeted frame index, or 0 to refer to one frame after +/// the last time ovr_SubmitFrame was called. +/// \param[in] latencyMarker Specifies that this call is the point in time where +/// the "App-to-Mid-Photon" latency timer starts from. If a given ovrLayer +/// provides "SensorSampleTimestamp", that will override the value stored here. +/// \param[in] hmdToEyePose Can be ovrEyeRenderDesc.HmdToEyePose returned from +/// ovr_GetRenderDesc. For monoscopic rendering, use a position vector that is average +/// of the two position vectors for each eyes. +/// \param[out] outEyePoses The predicted eye poses. +/// \param[out] outSensorSampleTime The time when this function was called. May be NULL, in which +/// case it is ignored. +/// +#undef ovr_GetEyePoses +OVR_PUBLIC_FUNCTION(void) +ovr_GetEyePoses( + ovrSession session, + long long frameIndex, + ovrBool latencyMarker, + const ovrVector3f hmdToEyeOffset[2], + ovrPosef outEyePoses[2], + double* outSensorSampleTime); +OVR_PRIVATE_FUNCTION(void) +ovr_GetEyePoses2( + ovrSession session, + long long frameIndex, + ovrBool latencyMarker, + const ovrPosef HmdToEyePose[2], + ovrPosef outEyePoses[2], + double* outSensorSampleTime); +#define ovr_GetEyePoses ovr_GetEyePoses2 + +/// Tracking poses provided by the SDK come in a right-handed coordinate system. If an application +/// is passing in ovrProjection_LeftHanded into ovrMatrix4f_Projection, then it should also use +/// this function to flip the HMD tracking poses to be left-handed. +/// +/// While this utility function is intended to convert a left-handed ovrPosef into a right-handed +/// coordinate system, it will also work for converting right-handed to left-handed since the +/// flip operation is the same for both cases. +/// +/// \param[in] inPose that is right-handed +/// \param[out] outPose that is requested to be left-handed (can be the same pointer to inPose) +/// +OVR_PUBLIC_FUNCTION(void) ovrPosef_FlipHandedness(const ovrPosef* inPose, ovrPosef* outPose); + +/// Reads an audio channel from Wav (Waveform Audio File) data. +/// Input must be a byte buffer representing a valid Wav file. Audio samples from the specified +/// channel are read, +/// converted to float [-1.0f, 1.0f] and returned through ovrAudioChannelData. +/// +/// Supported formats: PCM 8b, 16b, 32b and IEEE float (little-endian only). +/// +/// \param[out] outAudioChannel output audio channel data. +/// \param[in] inputData a binary buffer representing a valid Wav file data. +/// \param[in] dataSizeInBytes size of the buffer in bytes. +/// \param[in] stereoChannelToUse audio channel index to extract (0 for mono). +/// +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_ReadWavFromBuffer( + ovrAudioChannelData* outAudioChannel, + const void* inputData, + int dataSizeInBytes, + int stereoChannelToUse); + +/// Generates playable Touch Haptics data from an audio channel. +/// +/// \param[out] outHapticsClip generated Haptics clip. +/// \param[in] audioChannel input audio channel data. +/// \param[in] genMode mode used to convert and audio channel data to Haptics data. +/// +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_GenHapticsFromAudioData( + ovrHapticsClip* outHapticsClip, + const ovrAudioChannelData* audioChannel, + ovrHapticsGenMode genMode); + +/// Releases memory allocated for ovrAudioChannelData. Must be called to avoid memory leak. +/// \param[in] audioChannel pointer to an audio channel +/// +OVR_PUBLIC_FUNCTION(void) ovr_ReleaseAudioChannelData(ovrAudioChannelData* audioChannel); + +/// Releases memory allocated for ovrHapticsClip. Must be called to avoid memory leak. +/// \param[in] hapticsClip pointer to a haptics clip +/// +OVR_PUBLIC_FUNCTION(void) ovr_ReleaseHapticsClip(ovrHapticsClip* hapticsClip); + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif // Header include guard diff --git a/Include/Extras/OVR_Math.h b/Include/Extras/OVR_Math.h new file mode 100755 index 0000000..b784316 --- /dev/null +++ b/Include/Extras/OVR_Math.h @@ -0,0 +1,4332 @@ +/********************************************************************************/ /** + \file OVR_Math.h + \brief Implementation of 3D primitives such as vectors, matrices. + \copyright Copyright 2014-2016 Oculus VR, LLC All Rights reserved. + *************************************************************************************/ + +#ifndef OVR_Math_h +#define OVR_Math_h + +// This file is intended to be independent of the rest of LibOVR and LibOVRKernel and thus +// has no #include dependencies on either. + +#include +#include +#include +#include +#include +#include + +#ifndef OVR_EXCLUDE_CAPI_FROM_MATH +#include "../OVR_CAPI.h" // Required due to a dependence on the ovrFovPort_ declaration. +#endif + +#if defined(_MSC_VER) +#pragma warning(push) +#pragma warning(disable : 4127) // conditional expression is constant + +#if _MSC_VER < 1800 // isfinite was introduced in VS2013 +#define isfinite(x) _finite((x)) +#endif +#endif + +#if defined(_MSC_VER) +#define OVRMath_sprintf sprintf_s +#else +#define OVRMath_sprintf snprintf +#endif + +//------------------------------------------------------------------------------------- +// ***** OVR_MATH_ASSERT +// +// Independent debug break implementation for OVR_Math.h. + +#if !defined(OVR_MATH_DEBUG_BREAK) +#if defined(_DEBUG) +#if defined(_MSC_VER) +#define OVR_MATH_DEBUG_BREAK __debugbreak() +#else +#define OVR_MATH_DEBUG_BREAK __builtin_trap() +#endif +#else +#define OVR_MATH_DEBUG_BREAK ((void)0) +#endif +#endif + +//------------------------------------------------------------------------------------- +// ***** OVR_MATH_ASSERT +// +// Independent OVR_MATH_ASSERT implementation for OVR_Math.h. + +#if !defined(OVR_MATH_ASSERT) +#if defined(_DEBUG) +#define OVR_MATH_ASSERT(p) \ + if (!(p)) { \ + OVR_MATH_DEBUG_BREAK; \ + } +#else +#define OVR_MATH_ASSERT(p) ((void)0) +#endif +#endif + +//------------------------------------------------------------------------------------- +// ***** OVR_MATH_STATIC_ASSERT +// +// Independent OVR_MATH_ASSERT implementation for OVR_Math.h. + +#if !defined(OVR_MATH_STATIC_ASSERT) +#if defined(__cplusplus) && \ + ((defined(_MSC_VER) && (defined(_MSC_VER) >= 1600)) || defined(__GXX_EXPERIMENTAL_CXX0X__) || \ + (__cplusplus >= 201103L)) +#define OVR_MATH_STATIC_ASSERT static_assert +#else +#if !defined(OVR_SA_UNUSED) +#if defined(__GNUC__) || defined(__clang__) +#define OVR_SA_UNUSED __attribute__((unused)) +#else +#define OVR_SA_UNUSED +#endif +#define OVR_SA_PASTE(a, b) a##b +#define OVR_SA_HELP(a, b) OVR_SA_PASTE(a, b) +#endif + +#define OVR_MATH_STATIC_ASSERT(expression, msg) \ + typedef char OVR_SA_HELP(compileTimeAssert, __LINE__)[((expression) != 0) ? 1 : -1] OVR_SA_UNUSED +#endif +#endif + +namespace OVR { + +template +const T OVRMath_Min(const T a, const T b) { + return (a < b) ? a : b; +} + +template +const T OVRMath_Max(const T a, const T b) { + return (b < a) ? a : b; +} + +template +void OVRMath_Swap(T& a, T& b) { + T temp(a); + a = b; + b = temp; +} + +//------------------------------------------------------------------------------------- +// ***** Constants for 3D world/axis definitions. + +// Definitions of axes for coordinate and rotation conversions. +enum Axis { Axis_X = 0, Axis_Y = 1, Axis_Z = 2 }; + +// RotateDirection describes the rotation direction around an axis, interpreted as follows: +// CW - Clockwise while looking "down" from positive axis towards the origin. +// CCW - Counter-clockwise while looking from the positive axis towards the origin, +// which is in the negative axis direction. +// CCW is the default for the RHS coordinate system. Oculus standard RHS coordinate +// system defines Y up, X right, and Z back (pointing out from the screen). In this +// system Rotate_CCW around Z will specifies counter-clockwise rotation in XY plane. +enum RotateDirection { Rotate_CCW = 1, Rotate_CW = -1 }; + +// Constants for right handed and left handed coordinate systems +enum HandedSystem { Handed_R = 1, Handed_L = -1 }; + +// AxisDirection describes which way the coordinate axis points. Used by WorldAxes. +enum AxisDirection { + Axis_Up = 2, + Axis_Down = -2, + Axis_Right = 1, + Axis_Left = -1, + Axis_In = 3, + Axis_Out = -3 +}; + +struct WorldAxes { + AxisDirection XAxis, YAxis, ZAxis; + + WorldAxes(AxisDirection x, AxisDirection y, AxisDirection z) : XAxis(x), YAxis(y), ZAxis(z) { + OVR_MATH_ASSERT(abs(x) != abs(y) && abs(y) != abs(z) && abs(z) != abs(x)); + } +}; + +} // namespace OVR + +//------------------------------------------------------------------------------------// +// ***** C Compatibility Types + +// These declarations are used to support conversion between C types used in +// LibOVR C interfaces and their C++ versions. As an example, they allow passing +// Vector3f into a function that expects ovrVector3f. + +typedef struct ovrQuatf_ ovrQuatf; +typedef struct ovrQuatd_ ovrQuatd; +typedef struct ovrSizei_ ovrSizei; +typedef struct ovrSizef_ ovrSizef; +typedef struct ovrSized_ ovrSized; +typedef struct ovrRecti_ ovrRecti; +typedef struct ovrVector2i_ ovrVector2i; +typedef struct ovrVector2f_ ovrVector2f; +typedef struct ovrVector2d_ ovrVector2d; +typedef struct ovrVector3f_ ovrVector3f; +typedef struct ovrVector3d_ ovrVector3d; +typedef struct ovrVector4f_ ovrVector4f; +typedef struct ovrVector4d_ ovrVector4d; +typedef struct ovrMatrix2f_ ovrMatrix2f; +typedef struct ovrMatrix2d_ ovrMatrix2d; +typedef struct ovrMatrix3f_ ovrMatrix3f; +typedef struct ovrMatrix3d_ ovrMatrix3d; +typedef struct ovrMatrix4f_ ovrMatrix4f; +typedef struct ovrMatrix4d_ ovrMatrix4d; +typedef struct ovrPosef_ ovrPosef; +typedef struct ovrPosed_ ovrPosed; +typedef struct ovrPoseStatef_ ovrPoseStatef; +typedef struct ovrPoseStated_ ovrPoseStated; +typedef struct ovrFovPort_ ovrFovPort; + +namespace OVR { + +// Forward-declare our templates. +template +class Quat; +template +class Size; +template +class Rect; +template +class Vector2; +template +class Vector3; +template +class Vector4; +template +class Matrix2; +template +class Matrix3; +template +class Matrix4; +template +class Pose; +template +class PoseState; +struct FovPort; + +// CompatibleTypes::Type is used to lookup a compatible C-version of a C++ class. +template +struct CompatibleTypes { + // Declaration here seems necessary for MSVC; specializations are + // used instead. + typedef struct { + } Type; +}; + +// Specializations providing CompatibleTypes::Type value. +template <> +struct CompatibleTypes> { + typedef ovrQuatf Type; +}; +template <> +struct CompatibleTypes> { + typedef ovrQuatd Type; +}; +template <> +struct CompatibleTypes> { + typedef ovrMatrix2f Type; +}; +template <> +struct CompatibleTypes> { + typedef ovrMatrix2d Type; +}; +template <> +struct CompatibleTypes> { + typedef ovrMatrix3f Type; +}; +template <> +struct CompatibleTypes> { + typedef ovrMatrix3d Type; +}; +template <> +struct CompatibleTypes> { + typedef ovrMatrix4f Type; +}; +template <> +struct CompatibleTypes> { + typedef ovrMatrix4d Type; +}; +template <> +struct CompatibleTypes> { + typedef ovrSizei Type; +}; +template <> +struct CompatibleTypes> { + typedef ovrSizef Type; +}; +template <> +struct CompatibleTypes> { + typedef ovrSized Type; +}; +template <> +struct CompatibleTypes> { + typedef ovrRecti Type; +}; +template <> +struct CompatibleTypes> { + typedef ovrVector2i Type; +}; +template <> +struct CompatibleTypes> { + typedef ovrVector2f Type; +}; +template <> +struct CompatibleTypes> { + typedef ovrVector2d Type; +}; +template <> +struct CompatibleTypes> { + typedef ovrVector3f Type; +}; +template <> +struct CompatibleTypes> { + typedef ovrVector3d Type; +}; +template <> +struct CompatibleTypes> { + typedef ovrVector4f Type; +}; +template <> +struct CompatibleTypes> { + typedef ovrVector4d Type; +}; +template <> +struct CompatibleTypes> { + typedef ovrPosef Type; +}; +template <> +struct CompatibleTypes> { + typedef ovrPosed Type; +}; +template <> +struct CompatibleTypes { + typedef ovrFovPort Type; +}; + +//------------------------------------------------------------------------------------// +// ***** Math +// +// Math class contains constants and functions. This class is a template specialized +// per type, with Math and Math being distinct. +template +class Math { + public: + // By default, support explicit conversion to float. This allows Vector2 to + // compile, for example. + typedef float OtherFloatType; + + static int Tolerance() { + return 0; + } // Default value so integer types compile +}; + +//------------------------------------------------------------------------------------// +// ***** double constants +#define MATH_DOUBLE_PI 3.14159265358979323846 +#define MATH_DOUBLE_TWOPI (2 * MATH_DOUBLE_PI) +#define MATH_DOUBLE_PIOVER2 (0.5 * MATH_DOUBLE_PI) +#define MATH_DOUBLE_PIOVER4 (0.25 * MATH_DOUBLE_PI) +#define MATH_FLOAT_MAXVALUE (FLT_MAX) + +#define MATH_DOUBLE_RADTODEGREEFACTOR (360.0 / MATH_DOUBLE_TWOPI) +#define MATH_DOUBLE_DEGREETORADFACTOR (MATH_DOUBLE_TWOPI / 360.0) + +#define MATH_DOUBLE_E 2.71828182845904523536 +#define MATH_DOUBLE_LOG2E 1.44269504088896340736 +#define MATH_DOUBLE_LOG10E 0.434294481903251827651 +#define MATH_DOUBLE_LN2 0.693147180559945309417 +#define MATH_DOUBLE_LN10 2.30258509299404568402 + +#define MATH_DOUBLE_SQRT2 1.41421356237309504880 +#define MATH_DOUBLE_SQRT1_2 0.707106781186547524401 + +#define MATH_DOUBLE_TOLERANCE \ + 1e-12 // a default number for value equality tolerance: about 4500*Epsilon; +#define MATH_DOUBLE_SINGULARITYRADIUS \ + 1e-12 // about 1-cos(.0001 degree), for gimbal lock numerical problems + +#define MATH_DOUBLE_HUGENUMBER 1.3407807929942596e+154 +#define MATH_DOUBLE_SMALLESTNONDENORMAL 2.2250738585072014e-308 + +//------------------------------------------------------------------------------------// +// ***** float constants +#define MATH_FLOAT_PI float(MATH_DOUBLE_PI) +#define MATH_FLOAT_TWOPI float(MATH_DOUBLE_TWOPI) +#define MATH_FLOAT_PIOVER2 float(MATH_DOUBLE_PIOVER2) +#define MATH_FLOAT_PIOVER4 float(MATH_DOUBLE_PIOVER4) + +#define MATH_FLOAT_RADTODEGREEFACTOR float(MATH_DOUBLE_RADTODEGREEFACTOR) +#define MATH_FLOAT_DEGREETORADFACTOR float(MATH_DOUBLE_DEGREETORADFACTOR) + +#define MATH_FLOAT_E float(MATH_DOUBLE_E) +#define MATH_FLOAT_LOG2E float(MATH_DOUBLE_LOG2E) +#define MATH_FLOAT_LOG10E float(MATH_DOUBLE_LOG10E) +#define MATH_FLOAT_LN2 float(MATH_DOUBLE_LN2) +#define MATH_FLOAT_LN10 float(MATH_DOUBLE_LN10) + +#define MATH_FLOAT_SQRT2 float(MATH_DOUBLE_SQRT2) +#define MATH_FLOAT_SQRT1_2 float(MATH_DOUBLE_SQRT1_2) + +#define MATH_FLOAT_TOLERANCE \ + 1e-5f // a default number for value equality tolerance: 1e-5, about 84*EPSILON; +#define MATH_FLOAT_SINGULARITYRADIUS \ + 1e-7f // about 1-cos(.025 degree), for gimbal lock numerical problems + +#define MATH_FLOAT_HUGENUMBER 1.8446742974197924e+019f +#define MATH_FLOAT_SMALLESTNONDENORMAL 1.1754943508222875e-038f + +// Single-precision Math constants class. +template <> +class Math { + public: + typedef double OtherFloatType; + + static inline float MaxValue() { + return FLT_MAX; + }; + static inline float Tolerance() { + return MATH_FLOAT_TOLERANCE; + }; // a default number for value equality tolerance + static inline float SingularityRadius() { + return MATH_FLOAT_SINGULARITYRADIUS; + }; // for gimbal lock numerical problems + static inline float HugeNumber() { + return MATH_FLOAT_HUGENUMBER; + } + static inline float SmallestNonDenormal() { + return MATH_FLOAT_SMALLESTNONDENORMAL; + } +}; + +// Double-precision Math constants class +template <> +class Math { + public: + typedef float OtherFloatType; + + static inline double Tolerance() { + return MATH_DOUBLE_TOLERANCE; + }; // a default number for value equality tolerance + static inline double SingularityRadius() { + return MATH_DOUBLE_SINGULARITYRADIUS; + }; // for gimbal lock numerical problems + static inline double HugeNumber() { + return MATH_DOUBLE_HUGENUMBER; + } + static inline double SmallestNonDenormal() { + return MATH_DOUBLE_SMALLESTNONDENORMAL; + } +}; + +typedef Math Mathf; +typedef Math Mathd; + +// Conversion functions between degrees and radians +// (non-templated to ensure passing int arguments causes warning) +inline float RadToDegree(float rad) { + return rad * MATH_FLOAT_RADTODEGREEFACTOR; +} +inline double RadToDegree(double rad) { + return rad * MATH_DOUBLE_RADTODEGREEFACTOR; +} + +inline float DegreeToRad(float deg) { + return deg * MATH_FLOAT_DEGREETORADFACTOR; +} +inline double DegreeToRad(double deg) { + return deg * MATH_DOUBLE_DEGREETORADFACTOR; +} + +// Square function +template +inline T Sqr(T x) { + return x * x; +} + +// MERGE_MOBILE_SDK +// Safe reciprocal square root. +template +T RcpSqrt(const T f) { + return (f >= Math::SmallestNonDenormal()) ? static_cast(1.0 / sqrt(f)) + : Math::HugeNumber(); +} +// MERGE_MOBILE_SDK + +// Sign: returns 0 if x == 0, -1 if x < 0, and 1 if x > 0 +template +inline T Sign(T x) { + return (x != T(0)) ? (x < T(0) ? T(-1) : T(1)) : T(0); +} + +// Numerically stable acos function +inline float Acos(float x) { + return (x > 1.0f) ? 0.0f : (x < -1.0f) ? MATH_FLOAT_PI : acosf(x); +} +inline double Acos(double x) { + return (x > 1.0) ? 0.0 : (x < -1.0) ? MATH_DOUBLE_PI : acos(x); +} + +// Numerically stable asin function +inline float Asin(float x) { + return (x > 1.0f) ? MATH_FLOAT_PIOVER2 : (x < -1.0f) ? -MATH_FLOAT_PIOVER2 : asinf(x); +} +inline double Asin(double x) { + return (x > 1.0) ? MATH_DOUBLE_PIOVER2 : (x < -1.0) ? -MATH_DOUBLE_PIOVER2 : asin(x); +} + +template +class Quat; + +//------------------------------------------------------------------------------------- +// ***** Vector2<> + +// Vector2f (Vector2d) represents a 2-dimensional vector or point in space, +// consisting of coordinates x and y + +template +class Vector2 { + public: + typedef T ElementType; + static const size_t ElementCount = 2; + + T x, y; + + Vector2() : x(0), y(0) {} + Vector2(T x_, T y_) : x(x_), y(y_) {} + explicit Vector2(T s) : x(s), y(s) {} + explicit Vector2(const Vector2::OtherFloatType>& src) + : x((T)src.x), y((T)src.y) {} + + static Vector2 Zero() { + return Vector2(0, 0); + } + + // C-interop support. + typedef typename CompatibleTypes>::Type CompatibleType; + + Vector2(const CompatibleType& s) : x(s.x), y(s.y) {} + + operator const CompatibleType&() const { + OVR_MATH_STATIC_ASSERT( + sizeof(Vector2) == sizeof(CompatibleType), "sizeof(Vector2) failure"); + return reinterpret_cast(*this); + } + + bool operator==(const Vector2& b) const { + return x == b.x && y == b.y; + } + bool operator!=(const Vector2& b) const { + return x != b.x || y != b.y; + } + + Vector2 operator+(const Vector2& b) const { + return Vector2(x + b.x, y + b.y); + } + Vector2& operator+=(const Vector2& b) { + x += b.x; + y += b.y; + return *this; + } + Vector2 operator-(const Vector2& b) const { + return Vector2(x - b.x, y - b.y); + } + Vector2& operator-=(const Vector2& b) { + x -= b.x; + y -= b.y; + return *this; + } + Vector2 operator-() const { + return Vector2(-x, -y); + } + + // Scalar multiplication/division scales vector. + Vector2 operator*(T s) const { + return Vector2(x * s, y * s); + } + Vector2& operator*=(T s) { + x *= s; + y *= s; + return *this; + } + + Vector2 operator/(T s) const { + T rcp = T(1) / s; + return Vector2(x * rcp, y * rcp); + } + Vector2& operator/=(T s) { + T rcp = T(1) / s; + x *= rcp; + y *= rcp; + return *this; + } + + static Vector2 Min(const Vector2& a, const Vector2& b) { + return Vector2((a.x < b.x) ? a.x : b.x, (a.y < b.y) ? a.y : b.y); + } + static Vector2 Max(const Vector2& a, const Vector2& b) { + return Vector2((a.x > b.x) ? a.x : b.x, (a.y > b.y) ? a.y : b.y); + } + + Vector2 Clamped(T maxMag) const { + T magSquared = LengthSq(); + if (magSquared <= Sqr(maxMag)) + return *this; + else + return *this * (maxMag / sqrt(magSquared)); + } + + // Compare two vectors for equality with tolerance. Returns true if vectors match within + // tolerance. + bool IsEqual(const Vector2& b, T tolerance = Math::Tolerance()) const { + return (fabs(b.x - x) <= tolerance) && (fabs(b.y - y) <= tolerance); + } + bool Compare(const Vector2& b, T tolerance = Math::Tolerance()) const { + return IsEqual(b, tolerance); + } + + // Access element by index + T& operator[](int idx) { + OVR_MATH_ASSERT(0 <= idx && idx < 2); + return *(&x + idx); + } + const T& operator[](int idx) const { + OVR_MATH_ASSERT(0 <= idx && idx < 2); + return *(&x + idx); + } + + // Entry-wise product of two vectors + Vector2 EntrywiseMultiply(const Vector2& b) const { + return Vector2(x * b.x, y * b.y); + } + + // Multiply and divide operators do entry-wise math. Used Dot() for dot product. + Vector2 operator*(const Vector2& b) const { + return Vector2(x * b.x, y * b.y); + } + Vector2 operator/(const Vector2& b) const { + return Vector2(x / b.x, y / b.y); + } + + // Dot product + // Used to calculate angle q between two vectors among other things, + // as (A dot B) = |a||b|cos(q). + T Dot(const Vector2& b) const { + return x * b.x + y * b.y; + } + + // Returns the angle from this vector to b, in radians. + T Angle(const Vector2& b) const { + T div = LengthSq() * b.LengthSq(); + OVR_MATH_ASSERT(div != T(0)); + T result = Acos((this->Dot(b)) / sqrt(div)); + return result; + } + + // Return Length of the vector squared. + T LengthSq() const { + return (x * x + y * y); + } + + // Return vector length. + T Length() const { + return sqrt(LengthSq()); + } + + // Returns squared distance between two points represented by vectors. + T DistanceSq(const Vector2& b) const { + return (*this - b).LengthSq(); + } + + // Returns distance between two points represented by vectors. + T Distance(const Vector2& b) const { + return (*this - b).Length(); + } + + // Determine if this a unit vector. + bool IsNormalized() const { + return fabs(LengthSq() - T(1)) < Math::Tolerance(); + } + + // Normalize, convention vector length to 1. + void Normalize() { + T s = Length(); + if (s != T(0)) + s = T(1) / s; + *this *= s; + } + + // Returns normalized (unit) version of the vector without modifying itself. + Vector2 Normalized() const { + T s = Length(); + if (s != T(0)) + s = T(1) / s; + return *this * s; + } + + // Linearly interpolates from this vector to another. + // Factor should be between 0.0 and 1.0, with 0 giving full value to this. + Vector2 Lerp(const Vector2& b, T f) const { + return *this * (T(1) - f) + b * f; + } + + // Projects this vector onto the argument; in other words, + // A.Project(B) returns projection of vector A onto B. + Vector2 ProjectTo(const Vector2& b) const { + T l2 = b.LengthSq(); + OVR_MATH_ASSERT(l2 != T(0)); + return b * (Dot(b) / l2); + } + + // returns true if vector b is clockwise from this vector + bool IsClockwise(const Vector2& b) const { + return (x * b.y - y * b.x) < 0; + } +}; + +typedef Vector2 Vector2f; +typedef Vector2 Vector2d; +typedef Vector2 Vector2i; + +typedef Vector2 Point2f; +typedef Vector2 Point2d; +typedef Vector2 Point2i; + +//------------------------------------------------------------------------------------- +// ***** Vector3<> - 3D vector of {x, y, z} + +// +// Vector3f (Vector3d) represents a 3-dimensional vector or point in space, +// consisting of coordinates x, y and z. + +template +class Vector3 { + public: + typedef T ElementType; + static const size_t ElementCount = 3; + + T x, y, z; + + // FIXME: default initialization of a vector class can be very expensive in a full-blown + // application. A few hundred thousand vector constructions is not unlikely and can add + // up to milliseconds of time on processors like the PS3 PPU. + Vector3() : x(0), y(0), z(0) {} + Vector3(T x_, T y_, T z_ = 0) : x(x_), y(y_), z(z_) {} + explicit Vector3(T s) : x(s), y(s), z(s) {} + explicit Vector3(const Vector3::OtherFloatType>& src) + : x((T)src.x), y((T)src.y), z((T)src.z) {} + + static Vector3 Zero() { + return Vector3(0, 0, 0); + } + + // C-interop support. + typedef typename CompatibleTypes>::Type CompatibleType; + + Vector3(const CompatibleType& s) : x(s.x), y(s.y), z(s.z) {} + + operator const CompatibleType&() const { + OVR_MATH_STATIC_ASSERT( + sizeof(Vector3) == sizeof(CompatibleType), "sizeof(Vector3) failure"); + return reinterpret_cast(*this); + } + + bool operator==(const Vector3& b) const { + return x == b.x && y == b.y && z == b.z; + } + bool operator!=(const Vector3& b) const { + return x != b.x || y != b.y || z != b.z; + } + + Vector3 operator+(const Vector3& b) const { + return Vector3(x + b.x, y + b.y, z + b.z); + } + Vector3& operator+=(const Vector3& b) { + x += b.x; + y += b.y; + z += b.z; + return *this; + } + Vector3 operator-(const Vector3& b) const { + return Vector3(x - b.x, y - b.y, z - b.z); + } + Vector3& operator-=(const Vector3& b) { + x -= b.x; + y -= b.y; + z -= b.z; + return *this; + } + Vector3 operator-() const { + return Vector3(-x, -y, -z); + } + + // Scalar multiplication/division scales vector. + Vector3 operator*(T s) const { + return Vector3(x * s, y * s, z * s); + } + Vector3& operator*=(T s) { + x *= s; + y *= s; + z *= s; + return *this; + } + + Vector3 operator/(T s) const { + T rcp = T(1) / s; + return Vector3(x * rcp, y * rcp, z * rcp); + } + Vector3& operator/=(T s) { + T rcp = T(1) / s; + x *= rcp; + y *= rcp; + z *= rcp; + return *this; + } + + static Vector3 Min(const Vector3& a, const Vector3& b) { + return Vector3((a.x < b.x) ? a.x : b.x, (a.y < b.y) ? a.y : b.y, (a.z < b.z) ? a.z : b.z); + } + static Vector3 Max(const Vector3& a, const Vector3& b) { + return Vector3((a.x > b.x) ? a.x : b.x, (a.y > b.y) ? a.y : b.y, (a.z > b.z) ? a.z : b.z); + } + + Vector3 Clamped(T maxMag) const { + T magSquared = LengthSq(); + if (magSquared <= Sqr(maxMag)) + return *this; + else + return *this * (maxMag / sqrt(magSquared)); + } + + // Compare two vectors for equality with tolerance. Returns true if vectors match within + // tolerance. + bool IsEqual(const Vector3& b, T tolerance = Math::Tolerance()) const { + return (fabs(b.x - x) <= tolerance) && (fabs(b.y - y) <= tolerance) && + (fabs(b.z - z) <= tolerance); + } + bool Compare(const Vector3& b, T tolerance = Math::Tolerance()) const { + return IsEqual(b, tolerance); + } + + T& operator[](int idx) { + OVR_MATH_ASSERT(0 <= idx && idx < 3); + return *(&x + idx); + } + + const T& operator[](int idx) const { + OVR_MATH_ASSERT(0 <= idx && idx < 3); + return *(&x + idx); + } + + // Entrywise product of two vectors + Vector3 EntrywiseMultiply(const Vector3& b) const { + return Vector3(x * b.x, y * b.y, z * b.z); + } + + // Multiply and divide operators do entry-wise math + Vector3 operator*(const Vector3& b) const { + return Vector3(x * b.x, y * b.y, z * b.z); + } + + Vector3 operator/(const Vector3& b) const { + return Vector3(x / b.x, y / b.y, z / b.z); + } + + // Dot product + // Used to calculate angle q between two vectors among other things, + // as (A dot B) = |a||b|cos(q). + T Dot(const Vector3& b) const { + return x * b.x + y * b.y + z * b.z; + } + + // Compute cross product, which generates a normal vector. + // Direction vector can be determined by right-hand rule: Pointing index finder in + // direction a and middle finger in direction b, thumb will point in a.Cross(b). + Vector3 Cross(const Vector3& b) const { + return Vector3(y * b.z - z * b.y, z * b.x - x * b.z, x * b.y - y * b.x); + } + + // Returns the angle from this vector to b, in radians. + T Angle(const Vector3& b) const { + T div = LengthSq() * b.LengthSq(); + OVR_MATH_ASSERT(div != T(0)); + T result = Acos((this->Dot(b)) / sqrt(div)); + return result; + } + + // Return Length of the vector squared. + T LengthSq() const { + return (x * x + y * y + z * z); + } + + // Return vector length. + T Length() const { + return (T)sqrt(LengthSq()); + } + + // Returns squared distance between two points represented by vectors. + T DistanceSq(Vector3 const& b) const { + return (*this - b).LengthSq(); + } + + // Returns distance between two points represented by vectors. + T Distance(Vector3 const& b) const { + return (*this - b).Length(); + } + + bool IsNormalized() const { + return fabs(LengthSq() - T(1)) < Math::Tolerance(); + } + + // Normalize, convention vector length to 1. + void Normalize() { + T s = Length(); + if (s != T(0)) + s = T(1) / s; + *this *= s; + } + + // Returns normalized (unit) version of the vector without modifying itself. + Vector3 Normalized() const { + T s = Length(); + if (s != T(0)) + s = T(1) / s; + return *this * s; + } + + // Linearly interpolates from this vector to another. + // Factor should be between 0.0 and 1.0, with 0 giving full value to this. + Vector3 Lerp(const Vector3& b, T f) const { + return *this * (T(1) - f) + b * f; + } + + // Projects this vector onto the argument; in other words, + // A.Project(B) returns projection of vector A onto B. + Vector3 ProjectTo(const Vector3& b) const { + T l2 = b.LengthSq(); + OVR_MATH_ASSERT(l2 != T(0)); + return b * (Dot(b) / l2); + } + + // Projects this vector onto a plane defined by a normal vector + Vector3 ProjectToPlane(const Vector3& normal) const { + return *this - this->ProjectTo(normal); + } + + bool IsNan() const { + return !isfinite(x + y + z); + } + bool IsFinite() const { + return isfinite(x + y + z); + } +}; + +typedef Vector3 Vector3f; +typedef Vector3 Vector3d; +typedef Vector3 Vector3i; + +OVR_MATH_STATIC_ASSERT((sizeof(Vector3f) == 3 * sizeof(float)), "sizeof(Vector3f) failure"); +OVR_MATH_STATIC_ASSERT((sizeof(Vector3d) == 3 * sizeof(double)), "sizeof(Vector3d) failure"); +OVR_MATH_STATIC_ASSERT((sizeof(Vector3i) == 3 * sizeof(int32_t)), "sizeof(Vector3i) failure"); + +typedef Vector3 Point3f; +typedef Vector3 Point3d; +typedef Vector3 Point3i; + +//------------------------------------------------------------------------------------- +// ***** Vector4<> - 4D vector of {x, y, z, w} + +// +// Vector4f (Vector4d) represents a 3-dimensional vector or point in space, +// consisting of coordinates x, y, z and w. + +template +class Vector4 { + public: + typedef T ElementType; + static const size_t ElementCount = 4; + + T x, y, z, w; + + // FIXME: default initialization of a vector class can be very expensive in a full-blown + // application. A few hundred thousand vector constructions is not unlikely and can add + // up to milliseconds of time on processors like the PS3 PPU. + Vector4() : x(0), y(0), z(0), w(0) {} + Vector4(T x_, T y_, T z_, T w_) : x(x_), y(y_), z(z_), w(w_) {} + explicit Vector4(T s) : x(s), y(s), z(s), w(s) {} + explicit Vector4(const Vector3& v, const T w_ = T(1)) : x(v.x), y(v.y), z(v.z), w(w_) {} + explicit Vector4(const Vector4::OtherFloatType>& src) + : x((T)src.x), y((T)src.y), z((T)src.z), w((T)src.w) {} + + static Vector4 Zero() { + return Vector4(0, 0, 0, 0); + } + + // C-interop support. + typedef typename CompatibleTypes>::Type CompatibleType; + + Vector4(const CompatibleType& s) : x(s.x), y(s.y), z(s.z), w(s.w) {} + + operator const CompatibleType&() const { + OVR_MATH_STATIC_ASSERT( + sizeof(Vector4) == sizeof(CompatibleType), "sizeof(Vector4) failure"); + return reinterpret_cast(*this); + } + + Vector4& operator=(const Vector3& other) { + x = other.x; + y = other.y; + z = other.z; + w = 1; + return *this; + } + bool operator==(const Vector4& b) const { + return x == b.x && y == b.y && z == b.z && w == b.w; + } + bool operator!=(const Vector4& b) const { + return x != b.x || y != b.y || z != b.z || w != b.w; + } + + Vector4 operator+(const Vector4& b) const { + return Vector4(x + b.x, y + b.y, z + b.z, w + b.w); + } + Vector4& operator+=(const Vector4& b) { + x += b.x; + y += b.y; + z += b.z; + w += b.w; + return *this; + } + Vector4 operator-(const Vector4& b) const { + return Vector4(x - b.x, y - b.y, z - b.z, w - b.w); + } + Vector4& operator-=(const Vector4& b) { + x -= b.x; + y -= b.y; + z -= b.z; + w -= b.w; + return *this; + } + Vector4 operator-() const { + return Vector4(-x, -y, -z, -w); + } + + // Scalar multiplication/division scales vector. + Vector4 operator*(T s) const { + return Vector4(x * s, y * s, z * s, w * s); + } + Vector4& operator*=(T s) { + x *= s; + y *= s; + z *= s; + w *= s; + return *this; + } + + Vector4 operator/(T s) const { + T rcp = T(1) / s; + return Vector4(x * rcp, y * rcp, z * rcp, w * rcp); + } + Vector4& operator/=(T s) { + T rcp = T(1) / s; + x *= rcp; + y *= rcp; + z *= rcp; + w *= rcp; + return *this; + } + + static Vector4 Min(const Vector4& a, const Vector4& b) { + return Vector4( + (a.x < b.x) ? a.x : b.x, + (a.y < b.y) ? a.y : b.y, + (a.z < b.z) ? a.z : b.z, + (a.w < b.w) ? a.w : b.w); + } + static Vector4 Max(const Vector4& a, const Vector4& b) { + return Vector4( + (a.x > b.x) ? a.x : b.x, + (a.y > b.y) ? a.y : b.y, + (a.z > b.z) ? a.z : b.z, + (a.w > b.w) ? a.w : b.w); + } + + Vector4 Clamped(T maxMag) const { + T magSquared = LengthSq(); + if (magSquared <= Sqr(maxMag)) + return *this; + else + return *this * (maxMag / sqrt(magSquared)); + } + + // Compare two vectors for equality with tolerance. Returns true if vectors match within + // tolerance. + bool IsEqual(const Vector4& b, T tolerance = Math::Tolerance()) const { + return (fabs(b.x - x) <= tolerance) && (fabs(b.y - y) <= tolerance) && + (fabs(b.z - z) <= tolerance) && (fabs(b.w - w) <= tolerance); + } + bool Compare(const Vector4& b, T tolerance = Math::Tolerance()) const { + return IsEqual(b, tolerance); + } + + T& operator[](int idx) { + OVR_MATH_ASSERT(0 <= idx && idx < 4); + return *(&x + idx); + } + + const T& operator[](int idx) const { + OVR_MATH_ASSERT(0 <= idx && idx < 4); + return *(&x + idx); + } + + // Entry wise product of two vectors + Vector4 EntrywiseMultiply(const Vector4& b) const { + return Vector4(x * b.x, y * b.y, z * b.z, w * b.w); + } + + // Multiply and divide operators do entry-wise math + Vector4 operator*(const Vector4& b) const { + return Vector4(x * b.x, y * b.y, z * b.z, w * b.w); + } + + Vector4 operator/(const Vector4& b) const { + return Vector4(x / b.x, y / b.y, z / b.z, w / b.w); + } + + // Dot product + T Dot(const Vector4& b) const { + return x * b.x + y * b.y + z * b.z + w * b.w; + } + + // Return Length of the vector squared. + T LengthSq() const { + return (x * x + y * y + z * z + w * w); + } + + // Return vector length. + T Length() const { + return sqrt(LengthSq()); + } + + bool IsNormalized() const { + return fabs(LengthSq() - T(1)) < Math::Tolerance(); + } + + // Normalize, convention vector length to 1. + void Normalize() { + T s = Length(); + if (s != T(0)) + s = T(1) / s; + *this *= s; + } + + // Returns normalized (unit) version of the vector without modifying itself. + Vector4 Normalized() const { + T s = Length(); + if (s != T(0)) + s = T(1) / s; + return *this * s; + } + + // Linearly interpolates from this vector to another. + // Factor should be between 0.0 and 1.0, with 0 giving full value to this. + Vector4 Lerp(const Vector4& b, T f) const { + return *this * (T(1) - f) + b * f; + } +}; + +typedef Vector4 Vector4f; +typedef Vector4 Vector4d; +typedef Vector4 Vector4i; + +//------------------------------------------------------------------------------------- +// ***** Bounds3 + +// Bounds class used to describe a 3D axis aligned bounding box. + +template +class Bounds3 { + public: + Vector3 b[2]; + + Bounds3() { + Clear(); + } + + Bounds3(const Vector3& mins, const Vector3& maxs) { + b[0] = mins; + b[1] = maxs; + } + + void Clear() { + b[0].x = b[0].y = b[0].z = Math::MaxValue(); + b[1].x = b[1].y = b[1].z = -Math::MaxValue(); + } + + void AddPoint(const Vector3& v) { + b[0].x = (b[0].x < v.x ? b[0].x : v.x); + b[0].y = (b[0].y < v.y ? b[0].y : v.y); + b[0].z = (b[0].z < v.z ? b[0].z : v.z); + b[1].x = (v.x < b[1].x ? b[1].x : v.x); + b[1].y = (v.y < b[1].y ? b[1].y : v.y); + b[1].z = (v.z < b[1].z ? b[1].z : v.z); + } + + bool Excludes(const Vector3& v) const { + bool testing = false; + for (int32_t t = 0; t < 3; ++t) { + testing |= v[t] > b[1][t]; + testing |= v[t] < b[0][t]; + } + return testing; + } + + // exludes, ignoring vertical + bool ExcludesXZ(const Vector3& v) const { + bool testing = false; + testing |= v[0] > b[1][0]; + testing |= v[0] < b[0][0]; + testing |= v[2] > b[1][2]; + testing |= v[2] < b[0][2]; + return testing; + } + + bool Excludes(const Bounds3& bounds) const { + bool testing = false; + for (int32_t t = 0; t < 3; ++t) { + testing |= bounds.b[0][t] > b[1][t]; + testing |= bounds.b[1][t] < b[0][t]; + } + return testing; + } + + const Vector3& GetMins() const { + return b[0]; + } + const Vector3& GetMaxs() const { + return b[1]; + } + + Vector3& GetMins() { + return b[0]; + } + Vector3& GetMaxs() { + return b[1]; + } +}; + +typedef Bounds3 Bounds3f; +typedef Bounds3 Bounds3d; + +//------------------------------------------------------------------------------------- +// ***** Size + +// Size class represents 2D size with Width, Height components. +// Used to describe distentions of render targets, etc. + +template +class Size { + public: + T w, h; + + Size() : w(0), h(0) {} + Size(T w_, T h_) : w(w_), h(h_) {} + explicit Size(T s) : w(s), h(s) {} + explicit Size(const Size::OtherFloatType>& src) : w((T)src.w), h((T)src.h) {} + + // C-interop support. + typedef typename CompatibleTypes>::Type CompatibleType; + + Size(const CompatibleType& s) : w(s.w), h(s.h) {} + + operator const CompatibleType&() const { + OVR_MATH_STATIC_ASSERT(sizeof(Size) == sizeof(CompatibleType), "sizeof(Size) failure"); + return reinterpret_cast(*this); + } + + bool operator==(const Size& b) const { + return w == b.w && h == b.h; + } + bool operator!=(const Size& b) const { + return w != b.w || h != b.h; + } + + Size operator+(const Size& b) const { + return Size(w + b.w, h + b.h); + } + Size& operator+=(const Size& b) { + w += b.w; + h += b.h; + return *this; + } + Size operator-(const Size& b) const { + return Size(w - b.w, h - b.h); + } + Size& operator-=(const Size& b) { + w -= b.w; + h -= b.h; + return *this; + } + Size operator-() const { + return Size(-w, -h); + } + Size operator*(const Size& b) const { + return Size(w * b.w, h * b.h); + } + Size& operator*=(const Size& b) { + w *= b.w; + h *= b.h; + return *this; + } + Size operator/(const Size& b) const { + return Size(w / b.w, h / b.h); + } + Size& operator/=(const Size& b) { + w /= b.w; + h /= b.h; + return *this; + } + + // Scalar multiplication/division scales both components. + Size operator*(T s) const { + return Size(w * s, h * s); + } + Size& operator*=(T s) { + w *= s; + h *= s; + return *this; + } + Size operator/(T s) const { + return Size(w / s, h / s); + } + Size& operator/=(T s) { + w /= s; + h /= s; + return *this; + } + + static Size Min(const Size& a, const Size& b) { + return Size((a.w < b.w) ? a.w : b.w, (a.h < b.h) ? a.h : b.h); + } + static Size Max(const Size& a, const Size& b) { + return Size((a.w > b.w) ? a.w : b.w, (a.h > b.h) ? a.h : b.h); + } + + T Area() const { + return w * h; + } + + inline Vector2 ToVector() const { + return Vector2(w, h); + } +}; + +typedef Size Sizei; +typedef Size Sizeu; +typedef Size Sizef; +typedef Size Sized; + +//----------------------------------------------------------------------------------- +// ***** Rect + +// Rect describes a rectangular area for rendering, that includes position and size. +template +class Rect { + public: + T x, y; + T w, h; + + Rect() {} + Rect(T x1, T y1, T w1, T h1) : x(x1), y(y1), w(w1), h(h1) {} + Rect(const Vector2& pos, const Size& sz) : x(pos.x), y(pos.y), w(sz.w), h(sz.h) {} + Rect(const Size& sz) : x(0), y(0), w(sz.w), h(sz.h) {} + + // C-interop support. + typedef typename CompatibleTypes>::Type CompatibleType; + + Rect(const CompatibleType& s) : x(s.Pos.x), y(s.Pos.y), w(s.Size.w), h(s.Size.h) {} + + operator const CompatibleType&() const { + OVR_MATH_STATIC_ASSERT(sizeof(Rect) == sizeof(CompatibleType), "sizeof(Rect) failure"); + return reinterpret_cast(*this); + } + + Vector2 GetPos() const { + return Vector2(x, y); + } + Size GetSize() const { + return Size(w, h); + } + void SetPos(const Vector2& pos) { + x = pos.x; + y = pos.y; + } + void SetSize(const Size& sz) { + w = sz.w; + h = sz.h; + } + + bool operator==(const Rect& vp) const { + return (x == vp.x) && (y == vp.y) && (w == vp.w) && (h == vp.h); + } + bool operator!=(const Rect& vp) const { + return !operator==(vp); + } +}; + +typedef Rect Recti; + +//-------------------------------------------------------------------------------------// +// ***** Quat +// +// Quatf represents a quaternion class used for rotations. +// +// Quaternion multiplications are done in right-to-left order, to match the +// behavior of matrices. + +template +class Quat { + public: + typedef T ElementType; + static const size_t ElementCount = 4; + + // x,y,z = axis*sin(angle), w = cos(angle) + T x, y, z, w; + + Quat() : x(0), y(0), z(0), w(1) {} + Quat(T x_, T y_, T z_, T w_) : x(x_), y(y_), z(z_), w(w_) {} + explicit Quat(const Quat::OtherFloatType>& src) + : x((T)src.x), y((T)src.y), z((T)src.z), w((T)src.w) { + // NOTE: Converting a normalized Quat to Quat + // will generally result in an un-normalized quaternion. + // But we don't normalize here in case the quaternion + // being converted is not a normalized rotation quaternion. + } + + typedef typename CompatibleTypes>::Type CompatibleType; + + // C-interop support. + Quat(const CompatibleType& s) : x(s.x), y(s.y), z(s.z), w(s.w) {} + + operator CompatibleType() const { + CompatibleType result; + result.x = x; + result.y = y; + result.z = z; + result.w = w; + return result; + } + + // Constructs quaternion for rotation around the axis by an angle. + Quat(const Vector3& axis, T angle) { + // Make sure we don't divide by zero. + if (axis.LengthSq() == T(0)) { + // Assert if the axis is zero, but the angle isn't + OVR_MATH_ASSERT(angle == T(0)); + x = y = z = T(0); + w = T(1); + return; + } + + Vector3 unitAxis = axis.Normalized(); + T sinHalfAngle = sin(angle * T(0.5)); + + w = cos(angle * T(0.5)); + x = unitAxis.x * sinHalfAngle; + y = unitAxis.y * sinHalfAngle; + z = unitAxis.z * sinHalfAngle; + } + + // Constructs quaternion for rotation around one of the coordinate axis by an angle. + Quat(Axis A, T angle, RotateDirection d = Rotate_CCW, HandedSystem s = Handed_R) { + T sinHalfAngle = s * d * sin(angle * T(0.5)); + T v[3]; + v[0] = v[1] = v[2] = T(0); + v[A] = sinHalfAngle; + + w = cos(angle * T(0.5)); + x = v[0]; + y = v[1]; + z = v[2]; + } + + Quat operator-() { + return Quat(-x, -y, -z, -w); + } // unary minus + + static Quat Identity() { + return Quat(0, 0, 0, 1); + } + + // Compute axis and angle from quaternion + void GetAxisAngle(Vector3* axis, T* angle) const { + if (x * x + y * y + z * z > Math::Tolerance() * Math::Tolerance()) { + *axis = Vector3(x, y, z).Normalized(); + *angle = 2 * Acos(w); + if (*angle > ((T)MATH_DOUBLE_PI)) // Reduce the magnitude of the angle, if necessary + { + *angle = ((T)MATH_DOUBLE_TWOPI) - *angle; + *axis = *axis * (-1); + } + } else { + *axis = Vector3(1, 0, 0); + *angle = T(0); + } + } + + // Convert a quaternion to a rotation vector, also known as + // Rodrigues vector, AxisAngle vector, SORA vector, exponential map. + // A rotation vector describes a rotation about an axis: + // the axis of rotation is the vector normalized, + // the angle of rotation is the magnitude of the vector. + Vector3 ToRotationVector() const { + // OVR_MATH_ASSERT(IsNormalized()); // If this fires, caller has a quat math bug + T s = T(0); + T sinHalfAngle = sqrt(x * x + y * y + z * z); + if (sinHalfAngle > T(0)) { + T cosHalfAngle = w; + T halfAngle = atan2(sinHalfAngle, cosHalfAngle); + + // Ensure minimum rotation magnitude + if (cosHalfAngle < 0) + halfAngle -= T(MATH_DOUBLE_PI); + + s = T(2) * halfAngle / sinHalfAngle; + } + return Vector3(x * s, y * s, z * s); + } + + // Faster version of the above, optimized for use with small rotations, where rotation angle ~= + // sin(angle) + inline OVR::Vector3 FastToRotationVector() const { + OVR_MATH_ASSERT(IsNormalized()); // If this fires, caller has a quat math bug + T s; + T sinHalfSquared = x * x + y * y + z * z; + if (sinHalfSquared < T(.0037)) // =~ sin(7/2 degrees)^2 + { + // Max rotation magnitude error is about .062% at 7 degrees rotation, or about .0043 degrees + s = T(2) * Sign(w); + } else { + T sinHalfAngle = sqrt(sinHalfSquared); + T cosHalfAngle = w; + T halfAngle = atan2(sinHalfAngle, cosHalfAngle); + + // Ensure minimum rotation magnitude + if (cosHalfAngle < 0) + halfAngle -= T(MATH_DOUBLE_PI); + + s = T(2) * halfAngle / sinHalfAngle; + } + return Vector3(x * s, y * s, z * s); + } + + // Given a rotation vector of form unitRotationAxis * angle, + // returns the equivalent quaternion (unitRotationAxis * sin(angle), cos(Angle)). + static Quat FromRotationVector(const Vector3& v) { + T angleSquared = v.LengthSq(); + T s = T(0); + T c = T(1); + if (angleSquared > T(0)) { + T angle = sqrt(angleSquared); + s = sin(angle * T(0.5)) / angle; // normalize + c = cos(angle * T(0.5)); + } + return Quat(s * v.x, s * v.y, s * v.z, c); + } + + // Faster version of above, optimized for use with small rotation magnitudes, where rotation angle + // =~ sin(angle). + // If normalize is false, small-angle quaternions are returned un-normalized. + inline static Quat FastFromRotationVector(const OVR::Vector3& v, bool normalize = true) { + T s, c; + T angleSquared = v.LengthSq(); + if (angleSquared < T(0.0076)) // =~ (5 degrees*pi/180)^2 + { + s = T(0.5); + c = T(1.0); + // Max rotation magnitude error (after normalization) is about .064% at 5 degrees rotation, or + // .0032 degrees + if (normalize && angleSquared > 0) { + // sin(angle/2)^2 ~= (angle/2)^2 and cos(angle/2)^2 ~= 1 + T invLen = T(1) / sqrt(angleSquared * T(0.25) + T(1)); // normalize + s = s * invLen; + c = c * invLen; + } + } else { + T angle = sqrt(angleSquared); + s = sin(angle * T(0.5)) / angle; + c = cos(angle * T(0.5)); + } + return Quat(s * v.x, s * v.y, s * v.z, c); + } + + // Constructs the quaternion from a rotation matrix + explicit Quat(const Matrix4& m) { + T trace = m.M[0][0] + m.M[1][1] + m.M[2][2]; + + // In almost all cases, the first part is executed. + // However, if the trace is not positive, the other + // cases arise. + if (trace > T(0)) { + T s = sqrt(trace + T(1)) * T(2); // s=4*qw + w = T(0.25) * s; + x = (m.M[2][1] - m.M[1][2]) / s; + y = (m.M[0][2] - m.M[2][0]) / s; + z = (m.M[1][0] - m.M[0][1]) / s; + } else if ((m.M[0][0] > m.M[1][1]) && (m.M[0][0] > m.M[2][2])) { + T s = sqrt(T(1) + m.M[0][0] - m.M[1][1] - m.M[2][2]) * T(2); + w = (m.M[2][1] - m.M[1][2]) / s; + x = T(0.25) * s; + y = (m.M[0][1] + m.M[1][0]) / s; + z = (m.M[2][0] + m.M[0][2]) / s; + } else if (m.M[1][1] > m.M[2][2]) { + T s = sqrt(T(1) + m.M[1][1] - m.M[0][0] - m.M[2][2]) * T(2); // S=4*qy + w = (m.M[0][2] - m.M[2][0]) / s; + x = (m.M[0][1] + m.M[1][0]) / s; + y = T(0.25) * s; + z = (m.M[1][2] + m.M[2][1]) / s; + } else { + T s = sqrt(T(1) + m.M[2][2] - m.M[0][0] - m.M[1][1]) * T(2); // S=4*qz + w = (m.M[1][0] - m.M[0][1]) / s; + x = (m.M[0][2] + m.M[2][0]) / s; + y = (m.M[1][2] + m.M[2][1]) / s; + z = T(0.25) * s; + } + OVR_MATH_ASSERT(IsNormalized()); // Ensure input matrix is orthogonal + } + + // Constructs the quaternion from a rotation matrix + explicit Quat(const Matrix3& m) { + T trace = m.M[0][0] + m.M[1][1] + m.M[2][2]; + + // In almost all cases, the first part is executed. + // However, if the trace is not positive, the other + // cases arise. + if (trace > T(0)) { + T s = sqrt(trace + T(1)) * T(2); // s=4*qw + w = T(0.25) * s; + x = (m.M[2][1] - m.M[1][2]) / s; + y = (m.M[0][2] - m.M[2][0]) / s; + z = (m.M[1][0] - m.M[0][1]) / s; + } else if ((m.M[0][0] > m.M[1][1]) && (m.M[0][0] > m.M[2][2])) { + T s = sqrt(T(1) + m.M[0][0] - m.M[1][1] - m.M[2][2]) * T(2); + w = (m.M[2][1] - m.M[1][2]) / s; + x = T(0.25) * s; + y = (m.M[0][1] + m.M[1][0]) / s; + z = (m.M[2][0] + m.M[0][2]) / s; + } else if (m.M[1][1] > m.M[2][2]) { + T s = sqrt(T(1) + m.M[1][1] - m.M[0][0] - m.M[2][2]) * T(2); // S=4*qy + w = (m.M[0][2] - m.M[2][0]) / s; + x = (m.M[0][1] + m.M[1][0]) / s; + y = T(0.25) * s; + z = (m.M[1][2] + m.M[2][1]) / s; + } else { + T s = sqrt(T(1) + m.M[2][2] - m.M[0][0] - m.M[1][1]) * T(2); // S=4*qz + w = (m.M[1][0] - m.M[0][1]) / s; + x = (m.M[0][2] + m.M[2][0]) / s; + y = (m.M[1][2] + m.M[2][1]) / s; + z = T(0.25) * s; + } + OVR_MATH_ASSERT(IsNormalized()); // Ensure input matrix is orthogonal + } + + // MERGE_MOBILE_SDK + // Constructs a quaternion that rotates 'from' to line up with 'to'. + explicit Quat(const Vector3& from, const Vector3& to) { + const T cx = from.y * to.z - from.z * to.y; + const T cy = from.z * to.x - from.x * to.z; + const T cz = from.x * to.y - from.y * to.x; + const T dot = from.x * to.x + from.y * to.y + from.z * to.z; + const T crossLengthSq = cx * cx + cy * cy + cz * cz; + const T magnitude = static_cast(sqrt(crossLengthSq + dot * dot)); + const T cw = dot + magnitude; + if (cw < Math::SmallestNonDenormal()) { + const T sx = to.y * to.y + to.z * to.z; + const T sz = to.x * to.x + to.y * to.y; + if (sx > sz) { + const T rcpLength = RcpSqrt(sx); + x = T(0); + y = to.z * rcpLength; + z = -to.y * rcpLength; + w = T(0); + } else { + const T rcpLength = RcpSqrt(sz); + x = to.y * rcpLength; + y = -to.x * rcpLength; + z = T(0); + w = T(0); + } + return; + } + const T rcpLength = RcpSqrt(crossLengthSq + cw * cw); + x = cx * rcpLength; + y = cy * rcpLength; + z = cz * rcpLength; + w = cw * rcpLength; + } + // MERGE_MOBILE_SDK + + bool operator==(const Quat& b) const { + return x == b.x && y == b.y && z == b.z && w == b.w; + } + bool operator!=(const Quat& b) const { + return x != b.x || y != b.y || z != b.z || w != b.w; + } + + Quat operator+(const Quat& b) const { + return Quat(x + b.x, y + b.y, z + b.z, w + b.w); + } + Quat& operator+=(const Quat& b) { + w += b.w; + x += b.x; + y += b.y; + z += b.z; + return *this; + } + Quat operator-(const Quat& b) const { + return Quat(x - b.x, y - b.y, z - b.z, w - b.w); + } + Quat& operator-=(const Quat& b) { + w -= b.w; + x -= b.x; + y -= b.y; + z -= b.z; + return *this; + } + + Quat operator*(T s) const { + return Quat(x * s, y * s, z * s, w * s); + } + Quat& operator*=(T s) { + w *= s; + x *= s; + y *= s; + z *= s; + return *this; + } + Quat operator/(T s) const { + T rcp = T(1) / s; + return Quat(x * rcp, y * rcp, z * rcp, w * rcp); + } + Quat& operator/=(T s) { + T rcp = T(1) / s; + w *= rcp; + x *= rcp; + y *= rcp; + z *= rcp; + return *this; + } + + // MERGE_MOBILE_SDK + Vector3 operator*(const Vector3& v) const { + return Rotate(v); + } + // MERGE_MOBILE_SDK + + // Compare two quats for equality within tolerance. Returns true if quats match within tolerance. + bool IsEqual(const Quat& b, T tolerance = Math::Tolerance()) const { + return Abs(Dot(b)) >= T(1) - tolerance; + } + + // Compare two quats for equality within tolerance while checking matching hemispheres. Returns + // true if quats match within tolerance. + bool IsEqualMatchHemisphere(Quat b, T tolerance = Math::Tolerance()) const { + b.EnsureSameHemisphere(*this); + return Abs(Dot(b)) >= T(1) - tolerance; + } + + static T Abs(const T v) { + return (v >= 0) ? v : -v; + } + + // Get Imaginary part vector + Vector3 Imag() const { + return Vector3(x, y, z); + } + + // Get quaternion length. + T Length() const { + return sqrt(LengthSq()); + } + + // Get quaternion length squared. + T LengthSq() const { + return (x * x + y * y + z * z + w * w); + } + + // Simple Euclidean distance in R^4 (not SLERP distance, but at least respects Haar measure) + T Distance(const Quat& q) const { + T d1 = (*this - q).Length(); + T d2 = (*this + q).Length(); // Antipodal point check + return (d1 < d2) ? d1 : d2; + } + + T DistanceSq(const Quat& q) const { + T d1 = (*this - q).LengthSq(); + T d2 = (*this + q).LengthSq(); // Antipodal point check + return (d1 < d2) ? d1 : d2; + } + + T Dot(const Quat& q) const { + return x * q.x + y * q.y + z * q.z + w * q.w; + } + + // Angle between two quaternions in radians + T Angle(const Quat& q) const { + return T(2) * Acos(Abs(Dot(q))); + } + + // Angle of quaternion + T Angle() const { + return T(2) * Acos(Abs(w)); + } + + // Normalize + bool IsNormalized() const { + return fabs(LengthSq() - T(1)) < Math::Tolerance(); + } + + void Normalize() { + T s = Length(); + if (s != T(0)) + s = T(1) / s; + *this *= s; + } + + Quat Normalized() const { + T s = Length(); + if (s != T(0)) + s = T(1) / s; + return *this * s; + } + + inline void EnsureSameHemisphere(const Quat& o) { + if (Dot(o) < T(0)) { + x = -x; + y = -y; + z = -z; + w = -w; + } + } + + // Returns conjugate of the quaternion. Produces inverse rotation if quaternion is normalized. + Quat Conj() const { + return Quat(-x, -y, -z, w); + } + + // Quaternion multiplication. Combines quaternion rotations, performing the one on the + // right hand side first. + Quat operator*(const Quat& b) const { + return Quat( + w * b.x + x * b.w + y * b.z - z * b.y, + w * b.y - x * b.z + y * b.w + z * b.x, + w * b.z + x * b.y - y * b.x + z * b.w, + w * b.w - x * b.x - y * b.y - z * b.z); + } + const Quat& operator*=(const Quat& b) { + *this = *this * b; + return *this; + } + + // + // this^p normalized; same as rotating by this p times. + Quat PowNormalized(T p) const { + Vector3 v; + T a; + GetAxisAngle(&v, &a); + return Quat(v, a * p); + } + + // Compute quaternion that rotates v into alignTo: alignTo = Quat::Align(alignTo, v).Rotate(v). + // NOTE: alignTo and v must be normalized. + static Quat Align(const Vector3& alignTo, const Vector3& v) { + OVR_MATH_ASSERT(alignTo.IsNormalized() && v.IsNormalized()); + Vector3 bisector = (v + alignTo); + bisector.Normalize(); + T cosHalfAngle = v.Dot(bisector); // 0..1 + if (cosHalfAngle > T(0)) { + Vector3 imag = v.Cross(bisector); + return Quat(imag.x, imag.y, imag.z, cosHalfAngle); + } else { + // cosHalfAngle == 0: a 180 degree rotation. + // sinHalfAngle == 1, rotation axis is any axis perpendicular + // to alignTo. Choose axis to include largest magnitude components + if (fabs(v.x) > fabs(v.y)) { + // x or z is max magnitude component + // = Cross(v, (0,1,0)).Normalized(); + T invLen = sqrt(v.x * v.x + v.z * v.z); + if (invLen > T(0)) + invLen = T(1) / invLen; + return Quat(-v.z * invLen, 0, v.x * invLen, 0); + } else { + // y or z is max magnitude component + // = Cross(v, (1,0,0)).Normalized(); + T invLen = sqrt(v.y * v.y + v.z * v.z); + if (invLen > T(0)) + invLen = T(1) / invLen; + return Quat(0, v.z * invLen, -v.y * invLen, 0); + } + } + } + + // Decompose a quat into quat = swing * twist, where twist is a rotation about axis, + // and swing is a rotation perpendicular to axis. + Quat GetSwingTwist(const Vector3& axis, Quat* twist) const { + OVR_MATH_ASSERT(twist); + OVR_MATH_ASSERT(axis.IsNormalized()); + + // Create a normalized quaternion from projection of (x,y,z) onto axis + T d = axis.Dot(Vector3(x, y, z)); + *twist = Quat(axis.x * d, axis.y * d, axis.z * d, w); + T len = twist->Length(); + if (len == 0) + twist->w = T(1); // identity + else + *twist /= len; // normalize + + return *this * twist->Inverted(); + } + + // Normalized linear interpolation of quaternions + // NOTE: This function is a bad approximation of Slerp() + // when the angle between the *this and b is large. + // Use FastSlerp() or Slerp() instead. + Quat Lerp(const Quat& b, T s) const { + return (*this * (T(1) - s) + b * (Dot(b) < 0 ? -s : s)).Normalized(); + } + + // Spherical linear interpolation between rotations + Quat Slerp(const Quat& b, T s) const { + Vector3 delta = (b * this->Inverted()).ToRotationVector(); + return (FromRotationVector(delta * s) * *this) + .Normalized(); // normalize so errors don't accumulate + } + + // Spherical linear interpolation: much faster for small rotations, accurate for large rotations. + // See FastTo/FromRotationVector + Quat FastSlerp(const Quat& b, T s) const { + Vector3 delta = (b * this->Inverted()).FastToRotationVector(); + return (FastFromRotationVector(delta * s, false) * *this).Normalized(); + } + + // MERGE_MOBILE_SDK + // FIXME: This is opposite of Lerp for some reason. It goes from 1 to 0 instead of 0 to 1. + // Leaving it as a gift for future generations to deal with. + Quat Nlerp(const Quat& other, T a) const { + T sign = (Dot(other) >= 0.0f) ? 1.0f : -1.0f; + return (*this * sign * a + other * (1 - a)).Normalized(); + } + // MERGE_MOBILE_SDK + + // Rotate transforms vector in a manner that matches Matrix rotations (counter-clockwise, + // assuming negative direction of the axis). Standard formula: q(t) * V * q(t)^-1. + Vector3 Rotate(const Vector3& v) const { + OVR_MATH_ASSERT(IsNormalized()); // If this fires, caller has a quat math bug + + // rv = q * (v,0) * q' + // Same as rv = v + real * cross(imag,v)*2 + cross(imag, cross(imag,v)*2); + + // uv = 2 * Imag().Cross(v); + T uvx = T(2) * (y * v.z - z * v.y); + T uvy = T(2) * (z * v.x - x * v.z); + T uvz = T(2) * (x * v.y - y * v.x); + + // return v + Real()*uv + Imag().Cross(uv); + return Vector3( + v.x + w * uvx + y * uvz - z * uvy, + v.y + w * uvy + z * uvx - x * uvz, + v.z + w * uvz + x * uvy - y * uvx); + } + + // Rotation by inverse of *this + Vector3 InverseRotate(const Vector3& v) const { + OVR_MATH_ASSERT(IsNormalized()); // If this fires, caller has a quat math bug + + // rv = q' * (v,0) * q + // Same as rv = v + real * cross(-imag,v)*2 + cross(-imag, cross(-imag,v)*2); + // or rv = v - real * cross(imag,v)*2 + cross(imag, cross(imag,v)*2); + + // uv = 2 * Imag().Cross(v); + T uvx = T(2) * (y * v.z - z * v.y); + T uvy = T(2) * (z * v.x - x * v.z); + T uvz = T(2) * (x * v.y - y * v.x); + + // return v - Real()*uv + Imag().Cross(uv); + return Vector3( + v.x - w * uvx + y * uvz - z * uvy, + v.y - w * uvy + z * uvx - x * uvz, + v.z - w * uvz + x * uvy - y * uvx); + } + + // Inversed quaternion rotates in the opposite direction. + Quat Inverted() const { + return Quat(-x, -y, -z, w); + } + + Quat Inverse() const { + return Quat(-x, -y, -z, w); + } + + // Sets this quaternion to the one rotates in the opposite direction. + void Invert() { + *this = Quat(-x, -y, -z, w); + } + + // Time integration of constant angular velocity over dt + Quat TimeIntegrate(const Vector3& angularVelocity, T dt) const { + // solution is: this * exp( omega*dt/2 ); FromRotationVector(v) gives exp(v*.5). + return (*this * FastFromRotationVector(angularVelocity * dt, false)).Normalized(); + } + + // Time integration of constant angular acceleration and velocity over dt + // These are the first two terms of the "Magnus expansion" of the solution + // + // o = o * exp( W=(W1 + W2 + W3+...) * 0.5 ); + // + // omega1 = (omega + omegaDot*dt) + // W1 = (omega + omega1)*dt/2 + // W2 = cross(omega, omega1)/12*dt^2 % (= -cross(omega_dot, omega)/12*dt^3) + // Terms 3 and beyond are vanishingly small: + // W3 = cross(omega_dot, cross(omega_dot, omega))/240*dt^5 + // + Quat TimeIntegrate(const Vector3& angularVelocity, const Vector3& angularAcceleration, T dt) + const { + const Vector3& omega = angularVelocity; + const Vector3& omegaDot = angularAcceleration; + + Vector3 omega1 = (omega + omegaDot * dt); + Vector3 W = ((omega + omega1) + omega.Cross(omega1) * (dt / T(6))) * (dt / T(2)); + + // FromRotationVector(v) is exp(v*.5) + return (*this * FastFromRotationVector(W, false)).Normalized(); + } + + // Decompose rotation into three rotations: + // roll radians about Z axis, then pitch radians about X axis, then yaw radians about Y axis. + // Call with nullptr if a return value is not needed. + void GetYawPitchRoll(T* yaw, T* pitch, T* roll) const { + return GetEulerAngles(yaw, pitch, roll); + } + + // GetEulerAngles extracts Euler angles from the quaternion, in the specified order of + // axis rotations and the specified coordinate system. Right-handed coordinate system + // is the default, with CCW rotations while looking in the negative axis direction. + // Here a,b,c, are the Yaw/Pitch/Roll angles to be returned. + // Rotation order is c, b, a: + // rotation c around axis A3 + // is followed by rotation b around axis A2 + // is followed by rotation a around axis A1 + // rotations are CCW or CW (D) in LH or RH coordinate system (S) + // + template + void GetEulerAngles(T* a, T* b, T* c) const { + OVR_MATH_ASSERT(IsNormalized()); // If this fires, caller has a quat math bug + OVR_MATH_STATIC_ASSERT( + (A1 != A2) && (A2 != A3) && (A1 != A3), "(A1 != A2) && (A2 != A3) && (A1 != A3)"); + + T Q[3] = {x, y, z}; // Quaternion components x,y,z + + T ww = w * w; + T Q11 = Q[A1] * Q[A1]; + T Q22 = Q[A2] * Q[A2]; + T Q33 = Q[A3] * Q[A3]; + + T psign = T(-1); + // Determine whether even permutation + if (((A1 + 1) % 3 == A2) && ((A2 + 1) % 3 == A3)) + psign = T(1); + + T s2 = psign * T(2) * (psign * w * Q[A2] + Q[A1] * Q[A3]); + + T singularityRadius = Math::SingularityRadius(); + if (s2 < T(-1) + singularityRadius) { // South pole singularity + if (a) + *a = T(0); + if (b) + *b = -S * D * ((T)MATH_DOUBLE_PIOVER2); + if (c) + *c = S * D * atan2(T(2) * (psign * Q[A1] * Q[A2] + w * Q[A3]), ww + Q22 - Q11 - Q33); + } else if (s2 > T(1) - singularityRadius) { // North pole singularity + if (a) + *a = T(0); + if (b) + *b = S * D * ((T)MATH_DOUBLE_PIOVER2); + if (c) + *c = S * D * atan2(T(2) * (psign * Q[A1] * Q[A2] + w * Q[A3]), ww + Q22 - Q11 - Q33); + } else { + if (a) + *a = -S * D * atan2(T(-2) * (w * Q[A1] - psign * Q[A2] * Q[A3]), ww + Q33 - Q11 - Q22); + if (b) + *b = S * D * asin(s2); + if (c) + *c = S * D * atan2(T(2) * (w * Q[A3] - psign * Q[A1] * Q[A2]), ww + Q11 - Q22 - Q33); + } + } + + template + void GetEulerAngles(T* a, T* b, T* c) const { + GetEulerAngles(a, b, c); + } + + template + void GetEulerAngles(T* a, T* b, T* c) const { + GetEulerAngles(a, b, c); + } + + // GetEulerAnglesABA extracts Euler angles from the quaternion, in the specified order of + // axis rotations and the specified coordinate system. Right-handed coordinate system + // is the default, with CCW rotations while looking in the negative axis direction. + // Here a,b,c, are the Yaw/Pitch/Roll angles to be returned. + // rotation a around axis A1 + // is followed by rotation b around axis A2 + // is followed by rotation c around axis A1 + // Rotations are CCW or CW (D) in LH or RH coordinate system (S) + template + void GetEulerAnglesABA(T* a, T* b, T* c) const { + OVR_MATH_ASSERT(IsNormalized()); // If this fires, caller has a quat math bug + OVR_MATH_STATIC_ASSERT(A1 != A2, "A1 != A2"); + + T Q[3] = {x, y, z}; // Quaternion components + + // Determine the missing axis that was not supplied + int m = 3 - A1 - A2; + + T ww = w * w; + T Q11 = Q[A1] * Q[A1]; + T Q22 = Q[A2] * Q[A2]; + T Qmm = Q[m] * Q[m]; + + T psign = T(-1); + if ((A1 + 1) % 3 == A2) // Determine whether even permutation + { + psign = T(1); + } + + T c2 = ww + Q11 - Q22 - Qmm; + T singularityRadius = Math::SingularityRadius(); + if (c2 < T(-1) + singularityRadius) { // South pole singularity + if (a) + *a = T(0); + if (b) + *b = S * D * ((T)MATH_DOUBLE_PI); + if (c) + *c = S * D * atan2(T(2) * (w * Q[A1] - psign * Q[A2] * Q[m]), ww + Q22 - Q11 - Qmm); + } else if (c2 > T(1) - singularityRadius) { // North pole singularity + if (a) + *a = T(0); + if (b) + *b = T(0); + if (c) + *c = S * D * atan2(T(2) * (w * Q[A1] - psign * Q[A2] * Q[m]), ww + Q22 - Q11 - Qmm); + } else { + if (a) + *a = S * D * atan2(psign * w * Q[m] + Q[A1] * Q[A2], w * Q[A2] - psign * Q[A1] * Q[m]); + if (b) + *b = S * D * acos(c2); + if (c) + *c = S * D * atan2(-psign * w * Q[m] + Q[A1] * Q[A2], w * Q[A2] + psign * Q[A1] * Q[m]); + } + } + + bool IsNan() const { + return !isfinite(x + y + z + w); + } + bool IsFinite() const { + return isfinite(x + y + z + w); + } +}; + +typedef Quat Quatf; +typedef Quat Quatd; + +OVR_MATH_STATIC_ASSERT((sizeof(Quatf) == 4 * sizeof(float)), "sizeof(Quatf) failure"); +OVR_MATH_STATIC_ASSERT((sizeof(Quatd) == 4 * sizeof(double)), "sizeof(Quatd) failure"); + +//------------------------------------------------------------------------------------- +// ***** Pose +// +// Position and orientation combined. +// +// This structure needs to be the same size and layout on 32-bit and 64-bit arch. +// Update OVR_PadCheck.cpp when updating this object. +template +class Pose { + public: + typedef typename CompatibleTypes>::Type CompatibleType; + + Pose() {} + Pose(const Quat& orientation, const Vector3& pos) + : Rotation(orientation), Translation(pos) {} + Pose(const Pose& s) : Rotation(s.Rotation), Translation(s.Translation) {} + Pose(const Matrix3& R, const Vector3& t) : Rotation((Quat)R), Translation(t) {} + Pose(const CompatibleType& s) : Rotation(s.Orientation), Translation(s.Position) {} + + explicit Pose(const Pose::OtherFloatType>& s) + : Rotation(s.Rotation), Translation(s.Translation) { + // Ensure normalized rotation if converting from float to double + if (sizeof(T) > sizeof(typename Math::OtherFloatType)) + Rotation.Normalize(); + } + + static Pose Identity() { + return Pose(Quat(0, 0, 0, 1), Vector3(0, 0, 0)); + } + + void SetIdentity() { + Rotation = Quat(0, 0, 0, 1); + Translation = Vector3(0, 0, 0); + } + + // used to make things obviously broken if someone tries to use the value + void SetInvalid() { + Rotation = Quat(NAN, NAN, NAN, NAN); + Translation = Vector3(NAN, NAN, NAN); + } + + bool IsEqual(const Pose& b, T tolerance = Math::Tolerance()) const { + return Translation.IsEqual(b.Translation, tolerance) && Rotation.IsEqual(b.Rotation, tolerance); + } + + bool IsEqualMatchHemisphere(const Pose& b, T tolerance = Math::Tolerance()) const { + return Translation.IsEqual(b.Translation, tolerance) && + Rotation.IsEqualMatchHemisphere(b.Rotation, tolerance); + } + + operator typename CompatibleTypes>::Type() const { + typename CompatibleTypes>::Type result; + result.Orientation = Rotation; + result.Position = Translation; + return result; + } + + Quat Rotation; + Vector3 Translation; + + OVR_MATH_STATIC_ASSERT( + (sizeof(T) == sizeof(double) || sizeof(T) == sizeof(float)), + "(sizeof(T) == sizeof(double) || sizeof(T) == sizeof(float))"); + + void ToArray(T* arr) const { + T temp[7] = {Rotation.x, + Rotation.y, + Rotation.z, + Rotation.w, + Translation.x, + Translation.y, + Translation.z}; + for (int i = 0; i < 7; i++) + arr[i] = temp[i]; + } + + static Pose FromArray(const T* v) { + Quat rotation(v[0], v[1], v[2], v[3]); + Vector3 translation(v[4], v[5], v[6]); + // Ensure rotation is normalized, in case it was originally a float, stored in a .json file, + // etc. + return Pose(rotation.Normalized(), translation); + } + + Vector3 Rotate(const Vector3& v) const { + return Rotation.Rotate(v); + } + + Vector3 InverseRotate(const Vector3& v) const { + return Rotation.InverseRotate(v); + } + + Vector3 Translate(const Vector3& v) const { + return v + Translation; + } + + Vector3 Transform(const Vector3& v) const { + return Rotate(v) + Translation; + } + + Vector3 InverseTransform(const Vector3& v) const { + return InverseRotate(v - Translation); + } + + Vector3 TransformNormal(const Vector3& v) const { + return Rotate(v); + } + + Vector3 InverseTransformNormal(const Vector3& v) const { + return InverseRotate(v); + } + + Vector3 Apply(const Vector3& v) const { + return Transform(v); + } + + Pose operator*(const Pose& other) const { + return Pose(Rotation * other.Rotation, Apply(other.Translation)); + } + + Pose Inverted() const { + Quat inv = Rotation.Inverted(); + return Pose(inv, inv.Rotate(-Translation)); + } + + // Interpolation between two poses: translation is interpolated with Lerp(), + // and rotations are interpolated with Slerp(). + Pose Lerp(const Pose& b, T s) const { + return Pose(Rotation.Slerp(b.Rotation, s), Translation.Lerp(b.Translation, s)); + } + + // Similar to Lerp above, except faster in case of small rotation differences. See + // Quat::FastSlerp. + Pose FastLerp(const Pose& b, T s) const { + return Pose(Rotation.FastSlerp(b.Rotation, s), Translation.Lerp(b.Translation, s)); + } + + Pose TimeIntegrate(const Vector3& linearVelocity, const Vector3& angularVelocity, T dt) + const { + return Pose( + (Rotation * Quat::FastFromRotationVector(angularVelocity * dt, false)).Normalized(), + Translation + linearVelocity * dt); + } + + Pose TimeIntegrate( + const Vector3& linearVelocity, + const Vector3& linearAcceleration, + const Vector3& angularVelocity, + const Vector3& angularAcceleration, + T dt) const { + return Pose( + Rotation.TimeIntegrate(angularVelocity, angularAcceleration, dt), + Translation + linearVelocity * dt + linearAcceleration * dt * dt * T(0.5)); + } + + Pose Normalized() const { + return Pose(Rotation.Normalized(), Translation); + } + void Normalize() { + Rotation.Normalize(); + } + + bool IsNan() const { + return Translation.IsNan() || Rotation.IsNan(); + } + bool IsFinite() const { + return Translation.IsFinite() && Rotation.IsFinite(); + } +}; + +typedef Pose Posef; +typedef Pose Posed; + +OVR_MATH_STATIC_ASSERT( + (sizeof(Posed) == sizeof(Quatd) + sizeof(Vector3d)), + "sizeof(Posed) failure"); +OVR_MATH_STATIC_ASSERT( + (sizeof(Posef) == sizeof(Quatf) + sizeof(Vector3f)), + "sizeof(Posef) failure"); + +//------------------------------------------------------------------------------------- +// ***** Matrix4 +// +// Matrix4 is a 4x4 matrix used for 3d transformations and projections. +// Translation stored in the last column. +// The matrix is stored in row-major order in memory, meaning that values +// of the first row are stored before the next one. +// +// The arrangement of the matrix is chosen to be in Right-Handed +// coordinate system and counterclockwise rotations when looking down +// the axis +// +// Transformation Order: +// - Transformations are applied from right to left, so the expression +// M1 * M2 * M3 * V means that the vector V is transformed by M3 first, +// followed by M2 and M1. +// +// Coordinate system: Right Handed +// +// Rotations: Counterclockwise when looking down the axis. All angles are in radians. +// +// | sx 01 02 tx | // First column (sx, 10, 20): Axis X basis vector. +// | 10 sy 12 ty | // Second column (01, sy, 21): Axis Y basis vector. +// | 20 21 sz tz | // Third columnt (02, 12, sz): Axis Z basis vector. +// | 30 31 32 33 | +// +// The basis vectors are first three columns. + +template +class Matrix4 { + public: + typedef T ElementType; + static const size_t Dimension = 4; + + T M[4][4]; + + enum NoInitType { NoInit }; + + // Construct with no memory initialization. + Matrix4(NoInitType) {} + + // By default, we construct identity matrix. + Matrix4() { + M[0][0] = M[1][1] = M[2][2] = M[3][3] = T(1); + M[0][1] = M[1][0] = M[2][3] = M[3][1] = T(0); + M[0][2] = M[1][2] = M[2][0] = M[3][2] = T(0); + M[0][3] = M[1][3] = M[2][1] = M[3][0] = T(0); + } + + Matrix4( + T m11, + T m12, + T m13, + T m14, + T m21, + T m22, + T m23, + T m24, + T m31, + T m32, + T m33, + T m34, + T m41, + T m42, + T m43, + T m44) { + M[0][0] = m11; + M[0][1] = m12; + M[0][2] = m13; + M[0][3] = m14; + M[1][0] = m21; + M[1][1] = m22; + M[1][2] = m23; + M[1][3] = m24; + M[2][0] = m31; + M[2][1] = m32; + M[2][2] = m33; + M[2][3] = m34; + M[3][0] = m41; + M[3][1] = m42; + M[3][2] = m43; + M[3][3] = m44; + } + + Matrix4(T m11, T m12, T m13, T m21, T m22, T m23, T m31, T m32, T m33) { + M[0][0] = m11; + M[0][1] = m12; + M[0][2] = m13; + M[0][3] = T(0); + M[1][0] = m21; + M[1][1] = m22; + M[1][2] = m23; + M[1][3] = T(0); + M[2][0] = m31; + M[2][1] = m32; + M[2][2] = m33; + M[2][3] = T(0); + M[3][0] = T(0); + M[3][1] = T(0); + M[3][2] = T(0); + M[3][3] = T(1); + } + + explicit Matrix4(const Matrix3& m) { + M[0][0] = m.M[0][0]; + M[0][1] = m.M[0][1]; + M[0][2] = m.M[0][2]; + M[0][3] = T(0); + M[1][0] = m.M[1][0]; + M[1][1] = m.M[1][1]; + M[1][2] = m.M[1][2]; + M[1][3] = T(0); + M[2][0] = m.M[2][0]; + M[2][1] = m.M[2][1]; + M[2][2] = m.M[2][2]; + M[2][3] = T(0); + M[3][0] = T(0); + M[3][1] = T(0); + M[3][2] = T(0); + M[3][3] = T(1); + } + + explicit Matrix4(const Quat& q) { + OVR_MATH_ASSERT(q.IsNormalized()); // If this fires, caller has a quat math bug + T ww = q.w * q.w; + T xx = q.x * q.x; + T yy = q.y * q.y; + T zz = q.z * q.z; + + M[0][0] = ww + xx - yy - zz; + M[0][1] = 2 * (q.x * q.y - q.w * q.z); + M[0][2] = 2 * (q.x * q.z + q.w * q.y); + M[0][3] = T(0); + M[1][0] = 2 * (q.x * q.y + q.w * q.z); + M[1][1] = ww - xx + yy - zz; + M[1][2] = 2 * (q.y * q.z - q.w * q.x); + M[1][3] = T(0); + M[2][0] = 2 * (q.x * q.z - q.w * q.y); + M[2][1] = 2 * (q.y * q.z + q.w * q.x); + M[2][2] = ww - xx - yy + zz; + M[2][3] = T(0); + M[3][0] = T(0); + M[3][1] = T(0); + M[3][2] = T(0); + M[3][3] = T(1); + } + + explicit Matrix4(const Pose& p) { + Matrix4 result(p.Rotation); + result.SetTranslation(p.Translation); + *this = result; + } + + // C-interop support + explicit Matrix4(const Matrix4::OtherFloatType>& src) { + for (int i = 0; i < 4; i++) + for (int j = 0; j < 4; j++) + M[i][j] = (T)src.M[i][j]; + } + + // C-interop support. + Matrix4(const typename CompatibleTypes>::Type& s) { + OVR_MATH_STATIC_ASSERT(sizeof(s) == sizeof(Matrix4), "sizeof(s) == sizeof(Matrix4)"); + memcpy(M, s.M, sizeof(M)); + } + + operator typename CompatibleTypes>::Type() const { + typename CompatibleTypes>::Type result; + OVR_MATH_STATIC_ASSERT(sizeof(result) == sizeof(Matrix4), "sizeof(result) == sizeof(Matrix4)"); + memcpy(result.M, M, sizeof(M)); + return result; + } + + void ToString(char* dest, size_t destsize) const { + size_t pos = 0; + for (int r = 0; r < 4; r++) { + for (int c = 0; c < 4; c++) { + pos += OVRMath_sprintf(dest + pos, destsize - pos, "%g ", M[r][c]); + } + } + } + + static Matrix4 FromString(const char* src) { + Matrix4 result; + if (src) { + for (int r = 0; r < 4; r++) { + for (int c = 0; c < 4; c++) { + result.M[r][c] = (T)atof(src); + while (*src && *src != ' ') { + src++; + } + while (*src && *src == ' ') { + src++; + } + } + } + } + return result; + } + + static Matrix4 Identity() { + return Matrix4(); + } + + void SetIdentity() { + M[0][0] = M[1][1] = M[2][2] = M[3][3] = T(1); + M[0][1] = M[1][0] = M[2][3] = M[3][1] = T(0); + M[0][2] = M[1][2] = M[2][0] = M[3][2] = T(0); + M[0][3] = M[1][3] = M[2][1] = M[3][0] = T(0); + } + + void SetXBasis(const Vector3& v) { + M[0][0] = v.x; + M[1][0] = v.y; + M[2][0] = v.z; + } + Vector3 GetXBasis() const { + return Vector3(M[0][0], M[1][0], M[2][0]); + } + + void SetYBasis(const Vector3& v) { + M[0][1] = v.x; + M[1][1] = v.y; + M[2][1] = v.z; + } + Vector3 GetYBasis() const { + return Vector3(M[0][1], M[1][1], M[2][1]); + } + + void SetZBasis(const Vector3& v) { + M[0][2] = v.x; + M[1][2] = v.y; + M[2][2] = v.z; + } + Vector3 GetZBasis() const { + return Vector3(M[0][2], M[1][2], M[2][2]); + } + + bool operator==(const Matrix4& b) const { + bool isEqual = true; + for (int i = 0; i < 4; i++) + for (int j = 0; j < 4; j++) + isEqual &= (M[i][j] == b.M[i][j]); + + return isEqual; + } + + Matrix4 operator+(const Matrix4& b) const { + Matrix4 result(*this); + result += b; + return result; + } + + Matrix4& operator+=(const Matrix4& b) { + for (int i = 0; i < 4; i++) + for (int j = 0; j < 4; j++) + M[i][j] += b.M[i][j]; + return *this; + } + + Matrix4 operator-(const Matrix4& b) const { + Matrix4 result(*this); + result -= b; + return result; + } + + Matrix4& operator-=(const Matrix4& b) { + for (int i = 0; i < 4; i++) + for (int j = 0; j < 4; j++) + M[i][j] -= b.M[i][j]; + return *this; + } + + // Multiplies two matrices into destination with minimum copying. + static Matrix4& Multiply(Matrix4* d, const Matrix4& a, const Matrix4& b) { + OVR_MATH_ASSERT((d != &a) && (d != &b)); + int i = 0; + do { + d->M[i][0] = a.M[i][0] * b.M[0][0] + a.M[i][1] * b.M[1][0] + a.M[i][2] * b.M[2][0] + + a.M[i][3] * b.M[3][0]; + d->M[i][1] = a.M[i][0] * b.M[0][1] + a.M[i][1] * b.M[1][1] + a.M[i][2] * b.M[2][1] + + a.M[i][3] * b.M[3][1]; + d->M[i][2] = a.M[i][0] * b.M[0][2] + a.M[i][1] * b.M[1][2] + a.M[i][2] * b.M[2][2] + + a.M[i][3] * b.M[3][2]; + d->M[i][3] = a.M[i][0] * b.M[0][3] + a.M[i][1] * b.M[1][3] + a.M[i][2] * b.M[2][3] + + a.M[i][3] * b.M[3][3]; + } while ((++i) < 4); + + return *d; + } + + Matrix4 operator*(const Matrix4& b) const { + Matrix4 result(Matrix4::NoInit); + Multiply(&result, *this, b); + return result; + } + + Matrix4& operator*=(const Matrix4& b) { + return Multiply(this, Matrix4(*this), b); + } + + Matrix4 operator*(T s) const { + Matrix4 result(*this); + result *= s; + return result; + } + + Matrix4& operator*=(T s) { + for (int i = 0; i < 4; i++) + for (int j = 0; j < 4; j++) + M[i][j] *= s; + return *this; + } + + Matrix4 operator/(T s) const { + Matrix4 result(*this); + result /= s; + return result; + } + + Matrix4& operator/=(T s) { + for (int i = 0; i < 4; i++) + for (int j = 0; j < 4; j++) + M[i][j] /= s; + return *this; + } + + T operator()(int i, int j) const { + return M[i][j]; + } + T& operator()(int i, int j) { + return M[i][j]; + } + + Vector4 operator*(const Vector4& b) const { + return Transform(b); + } + + Vector3 Transform(const Vector3& v) const { + const T rcpW = T(1) / (M[3][0] * v.x + M[3][1] * v.y + M[3][2] * v.z + M[3][3]); + return Vector3( + (M[0][0] * v.x + M[0][1] * v.y + M[0][2] * v.z + M[0][3]) * rcpW, + (M[1][0] * v.x + M[1][1] * v.y + M[1][2] * v.z + M[1][3]) * rcpW, + (M[2][0] * v.x + M[2][1] * v.y + M[2][2] * v.z + M[2][3]) * rcpW); + } + + Vector4 Transform(const Vector4& v) const { + return Vector4( + M[0][0] * v.x + M[0][1] * v.y + M[0][2] * v.z + M[0][3] * v.w, + M[1][0] * v.x + M[1][1] * v.y + M[1][2] * v.z + M[1][3] * v.w, + M[2][0] * v.x + M[2][1] * v.y + M[2][2] * v.z + M[2][3] * v.w, + M[3][0] * v.x + M[3][1] * v.y + M[3][2] * v.z + M[3][3] * v.w); + } + + Matrix4 Transposed() const { + return Matrix4( + M[0][0], + M[1][0], + M[2][0], + M[3][0], + M[0][1], + M[1][1], + M[2][1], + M[3][1], + M[0][2], + M[1][2], + M[2][2], + M[3][2], + M[0][3], + M[1][3], + M[2][3], + M[3][3]); + } + + void Transpose() { + *this = Transposed(); + } + + T SubDet(const size_t* rows, const size_t* cols) const { + return M[rows[0]][cols[0]] * + (M[rows[1]][cols[1]] * M[rows[2]][cols[2]] - M[rows[1]][cols[2]] * M[rows[2]][cols[1]]) - + M[rows[0]][cols[1]] * + (M[rows[1]][cols[0]] * M[rows[2]][cols[2]] - M[rows[1]][cols[2]] * M[rows[2]][cols[0]]) + + M[rows[0]][cols[2]] * + (M[rows[1]][cols[0]] * M[rows[2]][cols[1]] - M[rows[1]][cols[1]] * M[rows[2]][cols[0]]); + } + + T Cofactor(size_t I, size_t J) const { + const size_t indices[4][3] = {{1, 2, 3}, {0, 2, 3}, {0, 1, 3}, {0, 1, 2}}; + return ((I + J) & 1) ? -SubDet(indices[I], indices[J]) : SubDet(indices[I], indices[J]); + } + + T Determinant() const { + return M[0][0] * Cofactor(0, 0) + M[0][1] * Cofactor(0, 1) + M[0][2] * Cofactor(0, 2) + + M[0][3] * Cofactor(0, 3); + } + + Matrix4 Adjugated() const { + return Matrix4( + Cofactor(0, 0), + Cofactor(1, 0), + Cofactor(2, 0), + Cofactor(3, 0), + Cofactor(0, 1), + Cofactor(1, 1), + Cofactor(2, 1), + Cofactor(3, 1), + Cofactor(0, 2), + Cofactor(1, 2), + Cofactor(2, 2), + Cofactor(3, 2), + Cofactor(0, 3), + Cofactor(1, 3), + Cofactor(2, 3), + Cofactor(3, 3)); + } + + Matrix4 Inverted() const { + T det = Determinant(); + OVR_MATH_ASSERT(det != 0); + return Adjugated() * (T(1) / det); + } + + void Invert() { + *this = Inverted(); + } + + // This is more efficient than general inverse, but ONLY works + // correctly if it is a homogeneous transform matrix (rot + trans) + Matrix4 InvertedHomogeneousTransform() const { + // Make the inverse rotation matrix + Matrix4 rinv = this->Transposed(); + rinv.M[3][0] = rinv.M[3][1] = rinv.M[3][2] = T(0); + // Make the inverse translation matrix + Vector3 tvinv(-M[0][3], -M[1][3], -M[2][3]); + Matrix4 tinv = Matrix4::Translation(tvinv); + return rinv * tinv; // "untranslate", then "unrotate" + } + + // This is more efficient than general inverse, but ONLY works + // correctly if it is a homogeneous transform matrix (rot + trans) + void InvertHomogeneousTransform() { + *this = InvertedHomogeneousTransform(); + } + + // Matrix to Euler Angles conversion + // a,b,c, are the YawPitchRoll angles to be returned + // rotation a around axis A1 + // is followed by rotation b around axis A2 + // is followed by rotation c around axis A3 + // rotations are CCW or CW (D) in LH or RH coordinate system (S) + template + void ToEulerAngles(T* a, T* b, T* c) const { + OVR_MATH_STATIC_ASSERT( + (A1 != A2) && (A2 != A3) && (A1 != A3), "(A1 != A2) && (A2 != A3) && (A1 != A3)"); + + T psign = T(-1); + if (((A1 + 1) % 3 == A2) && ((A2 + 1) % 3 == A3)) // Determine whether even permutation + psign = T(1); + + T pm = psign * M[A1][A3]; + T singularityRadius = Math::SingularityRadius(); + if (pm < T(-1) + singularityRadius) { // South pole singularity + *a = T(0); + *b = -S * D * ((T)MATH_DOUBLE_PIOVER2); + *c = S * D * atan2(psign * M[A2][A1], M[A2][A2]); + } else if (pm > T(1) - singularityRadius) { // North pole singularity + *a = T(0); + *b = S * D * ((T)MATH_DOUBLE_PIOVER2); + *c = S * D * atan2(psign * M[A2][A1], M[A2][A2]); + } else { // Normal case (nonsingular) + *a = S * D * atan2(-psign * M[A2][A3], M[A3][A3]); + *b = S * D * asin(pm); + *c = S * D * atan2(-psign * M[A1][A2], M[A1][A1]); + } + } + + // Matrix to Euler Angles conversion + // a,b,c, are the YawPitchRoll angles to be returned + // rotation a around axis A1 + // is followed by rotation b around axis A2 + // is followed by rotation c around axis A1 + // rotations are CCW or CW (D) in LH or RH coordinate system (S) + template + void ToEulerAnglesABA(T* a, T* b, T* c) const { + OVR_MATH_STATIC_ASSERT(A1 != A2, "A1 != A2"); + + // Determine the axis that was not supplied + int m = 3 - A1 - A2; + + T psign = T(-1); + if ((A1 + 1) % 3 == A2) // Determine whether even permutation + psign = T(1); + + T c2 = M[A1][A1]; + T singularityRadius = Math::SingularityRadius(); + if (c2 < T(-1) + singularityRadius) { // South pole singularity + *a = T(0); + *b = S * D * ((T)MATH_DOUBLE_PI); + *c = S * D * atan2(-psign * M[A2][m], M[A2][A2]); + } else if (c2 > T(1) - singularityRadius) { // North pole singularity + *a = T(0); + *b = T(0); + *c = S * D * atan2(-psign * M[A2][m], M[A2][A2]); + } else { // Normal case (nonsingular) + *a = S * D * atan2(M[A2][A1], -psign * M[m][A1]); + *b = S * D * acos(c2); + *c = S * D * atan2(M[A1][A2], psign * M[A1][m]); + } + } + + // Creates a matrix that converts the vertices from one coordinate system + // to another. + static Matrix4 AxisConversion(const WorldAxes& to, const WorldAxes& from) { + // Holds axis values from the 'to' structure + int toArray[3] = {to.XAxis, to.YAxis, to.ZAxis}; + + // The inverse of the toArray + int inv[4]; + inv[0] = inv[abs(to.XAxis)] = 0; + inv[abs(to.YAxis)] = 1; + inv[abs(to.ZAxis)] = 2; + + Matrix4 m(0, 0, 0, 0, 0, 0, 0, 0, 0); + + // Only three values in the matrix need to be changed to 1 or -1. + m.M[inv[abs(from.XAxis)]][0] = T(from.XAxis / toArray[inv[abs(from.XAxis)]]); + m.M[inv[abs(from.YAxis)]][1] = T(from.YAxis / toArray[inv[abs(from.YAxis)]]); + m.M[inv[abs(from.ZAxis)]][2] = T(from.ZAxis / toArray[inv[abs(from.ZAxis)]]); + return m; + } + + // Creates a matrix for translation by vector + static Matrix4 Translation(const Vector3& v) { + Matrix4 t; + t.M[0][3] = v.x; + t.M[1][3] = v.y; + t.M[2][3] = v.z; + return t; + } + + // Creates a matrix for translation by vector + static Matrix4 Translation(T x, T y, T z = T(0)) { + Matrix4 t; + t.M[0][3] = x; + t.M[1][3] = y; + t.M[2][3] = z; + return t; + } + + // Sets the translation part + void SetTranslation(const Vector3& v) { + M[0][3] = v.x; + M[1][3] = v.y; + M[2][3] = v.z; + } + + Vector3 GetTranslation() const { + return Vector3(M[0][3], M[1][3], M[2][3]); + } + + // Creates a matrix for scaling by vector + static Matrix4 Scaling(const Vector3& v) { + Matrix4 t; + t.M[0][0] = v.x; + t.M[1][1] = v.y; + t.M[2][2] = v.z; + return t; + } + + // Creates a matrix for scaling by vector + static Matrix4 Scaling(T x, T y, T z) { + Matrix4 t; + t.M[0][0] = x; + t.M[1][1] = y; + t.M[2][2] = z; + return t; + } + + // Creates a matrix for scaling by constant + static Matrix4 Scaling(T s) { + Matrix4 t; + t.M[0][0] = s; + t.M[1][1] = s; + t.M[2][2] = s; + return t; + } + + // Simple L1 distance in R^12 + T Distance(const Matrix4& m2) const { + T d = fabs(M[0][0] - m2.M[0][0]) + fabs(M[0][1] - m2.M[0][1]); + d += fabs(M[0][2] - m2.M[0][2]) + fabs(M[0][3] - m2.M[0][3]); + d += fabs(M[1][0] - m2.M[1][0]) + fabs(M[1][1] - m2.M[1][1]); + d += fabs(M[1][2] - m2.M[1][2]) + fabs(M[1][3] - m2.M[1][3]); + d += fabs(M[2][0] - m2.M[2][0]) + fabs(M[2][1] - m2.M[2][1]); + d += fabs(M[2][2] - m2.M[2][2]) + fabs(M[2][3] - m2.M[2][3]); + d += fabs(M[3][0] - m2.M[3][0]) + fabs(M[3][1] - m2.M[3][1]); + d += fabs(M[3][2] - m2.M[3][2]) + fabs(M[3][3] - m2.M[3][3]); + return d; + } + + // Creates a rotation matrix rotating around the X axis by 'angle' radians. + // Just for quick testing. Not for final API. Need to remove case. + static Matrix4 RotationAxis(Axis A, T angle, RotateDirection d, HandedSystem s) { + T sina = s * d * sin(angle); + T cosa = cos(angle); + + switch (A) { + case Axis_X: + return Matrix4(1, 0, 0, 0, cosa, -sina, 0, sina, cosa); + case Axis_Y: + return Matrix4(cosa, 0, sina, 0, 1, 0, -sina, 0, cosa); + case Axis_Z: + return Matrix4(cosa, -sina, 0, sina, cosa, 0, 0, 0, 1); + default: + return Matrix4(); + } + } + + // Creates a rotation matrix rotating around the X axis by 'angle' radians. + // Rotation direction is depends on the coordinate system: + // RHS (Oculus default): Positive angle values rotate Counter-clockwise (CCW), + // while looking in the negative axis direction. This is the + // same as looking down from positive axis values towards origin. + // LHS: Positive angle values rotate clock-wise (CW), while looking in the + // negative axis direction. + static Matrix4 RotationX(T angle) { + T sina = sin(angle); + T cosa = cos(angle); + return Matrix4(1, 0, 0, 0, cosa, -sina, 0, sina, cosa); + } + + // Creates a rotation matrix rotating around the Y axis by 'angle' radians. + // Rotation direction is depends on the coordinate system: + // RHS (Oculus default): Positive angle values rotate Counter-clockwise (CCW), + // while looking in the negative axis direction. This is the + // same as looking down from positive axis values towards origin. + // LHS: Positive angle values rotate clock-wise (CW), while looking in the + // negative axis direction. + static Matrix4 RotationY(T angle) { + T sina = (T)sin(angle); + T cosa = (T)cos(angle); + return Matrix4(cosa, 0, sina, 0, 1, 0, -sina, 0, cosa); + } + + // Creates a rotation matrix rotating around the Z axis by 'angle' radians. + // Rotation direction is depends on the coordinate system: + // RHS (Oculus default): Positive angle values rotate Counter-clockwise (CCW), + // while looking in the negative axis direction. This is the + // same as looking down from positive axis values towards origin. + // LHS: Positive angle values rotate clock-wise (CW), while looking in the + // negative axis direction. + static Matrix4 RotationZ(T angle) { + T sina = sin(angle); + T cosa = cos(angle); + return Matrix4(cosa, -sina, 0, sina, cosa, 0, 0, 0, 1); + } + + // LookAtRH creates a View transformation matrix for right-handed coordinate system. + // The resulting matrix points camera from 'eye' towards 'at' direction, with 'up' + // specifying the up vector. The resulting matrix should be used with PerspectiveRH + // projection. + static Matrix4 LookAtRH(const Vector3& eye, const Vector3& at, const Vector3& up) { + Vector3 z = (eye - at).Normalized(); // Forward + Vector3 x = up.Cross(z).Normalized(); // Right + Vector3 y = z.Cross(x); + + Matrix4 m( + x.x, + x.y, + x.z, + -(x.Dot(eye)), + y.x, + y.y, + y.z, + -(y.Dot(eye)), + z.x, + z.y, + z.z, + -(z.Dot(eye)), + 0, + 0, + 0, + 1); + return m; + } + + // LookAtLH creates a View transformation matrix for left-handed coordinate system. + // The resulting matrix points camera from 'eye' towards 'at' direction, with 'up' + // specifying the up vector. + static Matrix4 LookAtLH(const Vector3& eye, const Vector3& at, const Vector3& up) { + Vector3 z = (at - eye).Normalized(); // Forward + Vector3 x = up.Cross(z).Normalized(); // Right + Vector3 y = z.Cross(x); + + Matrix4 m( + x.x, + x.y, + x.z, + -(x.Dot(eye)), + y.x, + y.y, + y.z, + -(y.Dot(eye)), + z.x, + z.y, + z.z, + -(z.Dot(eye)), + 0, + 0, + 0, + 1); + return m; + } + + // PerspectiveRH creates a right-handed perspective projection matrix that can be + // used with the Oculus sample renderer. + // yfov - Specifies vertical field of view in radians. + // aspect - Screen aspect ration, which is usually width/height for square pixels. + // Note that xfov = yfov * aspect. + // znear - Absolute value of near Z clipping clipping range. + // zfar - Absolute value of far Z clipping clipping range (larger then near). + // Even though RHS usually looks in the direction of negative Z, positive values + // are expected for znear and zfar. + static Matrix4 PerspectiveRH(T yfov, T aspect, T znear, T zfar) { + Matrix4 m; + T tanHalfFov = (T)tan(yfov * T(0.5)); + + m.M[0][0] = T(1) / (aspect * tanHalfFov); + m.M[1][1] = T(1) / tanHalfFov; + m.M[2][2] = zfar / (znear - zfar); + m.M[3][2] = T(-1); + m.M[2][3] = (zfar * znear) / (znear - zfar); + m.M[3][3] = T(0); + + // Note: Post-projection matrix result assumes Left-Handed coordinate system, + // with Y up, X right and Z forward. This supports positive z-buffer values. + // This is the case even for RHS coordinate input. + return m; + } + + // PerspectiveLH creates a left-handed perspective projection matrix that can be + // used with the Oculus sample renderer. + // yfov - Specifies vertical field of view in radians. + // aspect - Screen aspect ration, which is usually width/height for square pixels. + // Note that xfov = yfov * aspect. + // znear - Absolute value of near Z clipping clipping range. + // zfar - Absolute value of far Z clipping clipping range (larger then near). + static Matrix4 PerspectiveLH(T yfov, T aspect, T znear, T zfar) { + Matrix4 m; + T tanHalfFov = (T)tan(yfov * T(0.5)); + + m.M[0][0] = T(1) / (aspect * tanHalfFov); + m.M[1][1] = T(1) / tanHalfFov; + // m.M[2][2] = zfar / (znear - zfar); + m.M[2][2] = zfar / (zfar - znear); + m.M[3][2] = T(-1); + m.M[2][3] = (zfar * znear) / (znear - zfar); + m.M[3][3] = T(0); + + // Note: Post-projection matrix result assumes Left-Handed coordinate system, + // with Y up, X right and Z forward. This supports positive z-buffer values. + // This is the case even for RHS coordinate input. + return m; + } + + static Matrix4 Ortho2D(T w, T h) { + Matrix4 m; + m.M[0][0] = T(2.0) / w; + m.M[1][1] = T(-2.0) / h; + m.M[0][3] = T(-1.0); + m.M[1][3] = T(1.0); + m.M[2][2] = T(0); + return m; + } +}; + +typedef Matrix4 Matrix4f; +typedef Matrix4 Matrix4d; + +//------------------------------------------------------------------------------------- +// ***** Matrix3 +// +// Matrix3 is a 3x3 matrix used for representing a rotation matrix. +// The matrix is stored in row-major order in memory, meaning that values +// of the first row are stored before the next one. +// +// The arrangement of the matrix is chosen to be in Right-Handed +// coordinate system and counterclockwise rotations when looking down +// the axis +// +// Transformation Order: +// - Transformations are applied from right to left, so the expression +// M1 * M2 * M3 * V means that the vector V is transformed by M3 first, +// followed by M2 and M1. +// +// Coordinate system: Right Handed +// +// Rotations: Counterclockwise when looking down the axis. All angles are in radians. + +template +class Matrix3 { + public: + typedef T ElementType; + static const size_t Dimension = 3; + + T M[3][3]; + + enum NoInitType { NoInit }; + + // Construct with no memory initialization. + Matrix3(NoInitType) {} + + // By default, we construct identity matrix. + Matrix3() { + M[0][0] = M[1][1] = M[2][2] = T(1); + M[0][1] = M[1][0] = M[2][0] = T(0); + M[0][2] = M[1][2] = M[2][1] = T(0); + } + + Matrix3(T m11, T m12, T m13, T m21, T m22, T m23, T m31, T m32, T m33) { + M[0][0] = m11; + M[0][1] = m12; + M[0][2] = m13; + M[1][0] = m21; + M[1][1] = m22; + M[1][2] = m23; + M[2][0] = m31; + M[2][1] = m32; + M[2][2] = m33; + } + + // Construction from X, Y, Z basis vectors + Matrix3(const Vector3& xBasis, const Vector3& yBasis, const Vector3& zBasis) { + M[0][0] = xBasis.x; + M[0][1] = yBasis.x; + M[0][2] = zBasis.x; + M[1][0] = xBasis.y; + M[1][1] = yBasis.y; + M[1][2] = zBasis.y; + M[2][0] = xBasis.z; + M[2][1] = yBasis.z; + M[2][2] = zBasis.z; + } + + explicit Matrix3(const Quat& q) { + OVR_MATH_ASSERT(q.IsNormalized()); // If this fires, caller has a quat math bug + const T tx = q.x + q.x, ty = q.y + q.y, tz = q.z + q.z; + const T twx = q.w * tx, twy = q.w * ty, twz = q.w * tz; + const T txx = q.x * tx, txy = q.x * ty, txz = q.x * tz; + const T tyy = q.y * ty, tyz = q.y * tz, tzz = q.z * tz; + M[0][0] = T(1) - (tyy + tzz); + M[0][1] = txy - twz; + M[0][2] = txz + twy; + M[1][0] = txy + twz; + M[1][1] = T(1) - (txx + tzz); + M[1][2] = tyz - twx; + M[2][0] = txz - twy; + M[2][1] = tyz + twx; + M[2][2] = T(1) - (txx + tyy); + } + + inline explicit Matrix3(T s) { + M[0][0] = M[1][1] = M[2][2] = s; + M[0][1] = M[0][2] = M[1][0] = M[1][2] = M[2][0] = M[2][1] = T(0); + } + + Matrix3(T m11, T m22, T m33) { + M[0][0] = m11; + M[0][1] = T(0); + M[0][2] = T(0); + M[1][0] = T(0); + M[1][1] = m22; + M[1][2] = T(0); + M[2][0] = T(0); + M[2][1] = T(0); + M[2][2] = m33; + } + + explicit Matrix3(const Matrix3::OtherFloatType>& src) { + for (int i = 0; i < 3; i++) + for (int j = 0; j < 3; j++) + M[i][j] = (T)src.M[i][j]; + } + + // C-interop support. + Matrix3(const typename CompatibleTypes>::Type& s) { + OVR_MATH_STATIC_ASSERT(sizeof(s) == sizeof(Matrix3), "sizeof(s) == sizeof(Matrix3)"); + memcpy(M, s.M, sizeof(M)); + } + + operator const typename CompatibleTypes>::Type() const { + typename CompatibleTypes>::Type result; + OVR_MATH_STATIC_ASSERT(sizeof(result) == sizeof(Matrix3), "sizeof(result) == sizeof(Matrix3)"); + memcpy(result.M, M, sizeof(M)); + return result; + } + + T operator()(int i, int j) const { + return M[i][j]; + } + T& operator()(int i, int j) { + return M[i][j]; + } + + void ToString(char* dest, size_t destsize) const { + size_t pos = 0; + for (int r = 0; r < 3; r++) { + for (int c = 0; c < 3; c++) + pos += OVRMath_sprintf(dest + pos, destsize - pos, "%g ", M[r][c]); + } + } + + static Matrix3 FromString(const char* src) { + Matrix3 result; + if (src) { + for (int r = 0; r < 3; r++) { + for (int c = 0; c < 3; c++) { + result.M[r][c] = (T)atof(src); + while (*src && *src != ' ') + src++; + while (*src && *src == ' ') + src++; + } + } + } + return result; + } + + static Matrix3 Identity() { + return Matrix3(); + } + + void SetIdentity() { + M[0][0] = M[1][1] = M[2][2] = T(1); + M[0][1] = M[1][0] = M[2][0] = T(0); + M[0][2] = M[1][2] = M[2][1] = T(0); + } + + static Matrix3 Diagonal(T m00, T m11, T m22) { + return Matrix3(m00, 0, 0, 0, m11, 0, 0, 0, m22); + } + static Matrix3 Diagonal(const Vector3& v) { + return Diagonal(v.x, v.y, v.z); + } + + T Trace() const { + return M[0][0] + M[1][1] + M[2][2]; + } + + bool operator==(const Matrix3& b) const { + bool isEqual = true; + for (int i = 0; i < 3; i++) { + for (int j = 0; j < 3; j++) + isEqual &= (M[i][j] == b.M[i][j]); + } + + return isEqual; + } + + Matrix3 operator+(const Matrix3& b) const { + Matrix3 result(*this); + result += b; + return result; + } + + Matrix3& operator+=(const Matrix3& b) { + for (int i = 0; i < 3; i++) + for (int j = 0; j < 3; j++) + M[i][j] += b.M[i][j]; + return *this; + } + + void operator=(const Matrix3& b) { + for (int i = 0; i < 3; i++) + for (int j = 0; j < 3; j++) + M[i][j] = b.M[i][j]; + } + + Matrix3 operator-(const Matrix3& b) const { + Matrix3 result(*this); + result -= b; + return result; + } + + Matrix3& operator-=(const Matrix3& b) { + for (int i = 0; i < 3; i++) { + for (int j = 0; j < 3; j++) + M[i][j] -= b.M[i][j]; + } + + return *this; + } + + // Multiplies two matrices into destination with minimum copying. + static Matrix3& Multiply(Matrix3* d, const Matrix3& a, const Matrix3& b) { + OVR_MATH_ASSERT((d != &a) && (d != &b)); + int i = 0; + do { + d->M[i][0] = a.M[i][0] * b.M[0][0] + a.M[i][1] * b.M[1][0] + a.M[i][2] * b.M[2][0]; + d->M[i][1] = a.M[i][0] * b.M[0][1] + a.M[i][1] * b.M[1][1] + a.M[i][2] * b.M[2][1]; + d->M[i][2] = a.M[i][0] * b.M[0][2] + a.M[i][1] * b.M[1][2] + a.M[i][2] * b.M[2][2]; + } while ((++i) < 3); + + return *d; + } + + Matrix3 operator*(const Matrix3& b) const { + Matrix3 result(Matrix3::NoInit); + Multiply(&result, *this, b); + return result; + } + + Matrix3& operator*=(const Matrix3& b) { + return Multiply(this, Matrix3(*this), b); + } + + Matrix3 operator*(T s) const { + Matrix3 result(*this); + result *= s; + return result; + } + + Matrix3& operator*=(T s) { + for (int i = 0; i < 3; i++) { + for (int j = 0; j < 3; j++) + M[i][j] *= s; + } + + return *this; + } + + Vector3 operator*(const Vector3& b) const { + Vector3 result; + result.x = M[0][0] * b.x + M[0][1] * b.y + M[0][2] * b.z; + result.y = M[1][0] * b.x + M[1][1] * b.y + M[1][2] * b.z; + result.z = M[2][0] * b.x + M[2][1] * b.y + M[2][2] * b.z; + + return result; + } + + Matrix3 operator/(T s) const { + Matrix3 result(*this); + result /= s; + return result; + } + + Matrix3& operator/=(T s) { + for (int i = 0; i < 3; i++) { + for (int j = 0; j < 3; j++) + M[i][j] /= s; + } + + return *this; + } + + Vector2 Transform(const Vector2& v) const { + const T rcpZ = T(1) / (M[2][0] * v.x + M[2][1] * v.y + M[2][2]); + return Vector2( + (M[0][0] * v.x + M[0][1] * v.y + M[0][2]) * rcpZ, + (M[1][0] * v.x + M[1][1] * v.y + M[1][2]) * rcpZ); + } + + Vector3 Transform(const Vector3& v) const { + return Vector3( + M[0][0] * v.x + M[0][1] * v.y + M[0][2] * v.z, + M[1][0] * v.x + M[1][1] * v.y + M[1][2] * v.z, + M[2][0] * v.x + M[2][1] * v.y + M[2][2] * v.z); + } + + Matrix3 Transposed() const { + return Matrix3(M[0][0], M[1][0], M[2][0], M[0][1], M[1][1], M[2][1], M[0][2], M[1][2], M[2][2]); + } + + void Transpose() { + *this = Transposed(); + } + + T SubDet(const size_t* rows, const size_t* cols) const { + return M[rows[0]][cols[0]] * + (M[rows[1]][cols[1]] * M[rows[2]][cols[2]] - M[rows[1]][cols[2]] * M[rows[2]][cols[1]]) - + M[rows[0]][cols[1]] * + (M[rows[1]][cols[0]] * M[rows[2]][cols[2]] - M[rows[1]][cols[2]] * M[rows[2]][cols[0]]) + + M[rows[0]][cols[2]] * + (M[rows[1]][cols[0]] * M[rows[2]][cols[1]] - M[rows[1]][cols[1]] * M[rows[2]][cols[0]]); + } + + // M += a*b.t() + inline void Rank1Add(const Vector3& a, const Vector3& b) { + M[0][0] += a.x * b.x; + M[0][1] += a.x * b.y; + M[0][2] += a.x * b.z; + M[1][0] += a.y * b.x; + M[1][1] += a.y * b.y; + M[1][2] += a.y * b.z; + M[2][0] += a.z * b.x; + M[2][1] += a.z * b.y; + M[2][2] += a.z * b.z; + } + + // M -= a*b.t() + inline void Rank1Sub(const Vector3& a, const Vector3& b) { + M[0][0] -= a.x * b.x; + M[0][1] -= a.x * b.y; + M[0][2] -= a.x * b.z; + M[1][0] -= a.y * b.x; + M[1][1] -= a.y * b.y; + M[1][2] -= a.y * b.z; + M[2][0] -= a.z * b.x; + M[2][1] -= a.z * b.y; + M[2][2] -= a.z * b.z; + } + + inline Vector3 Col(int c) const { + return Vector3(M[0][c], M[1][c], M[2][c]); + } + + inline Vector3 Row(int r) const { + return Vector3(M[r][0], M[r][1], M[r][2]); + } + + inline Vector3 GetColumn(int c) const { + return Vector3(M[0][c], M[1][c], M[2][c]); + } + + inline Vector3 GetRow(int r) const { + return Vector3(M[r][0], M[r][1], M[r][2]); + } + + inline void SetColumn(int c, const Vector3& v) { + M[0][c] = v.x; + M[1][c] = v.y; + M[2][c] = v.z; + } + + inline void SetRow(int r, const Vector3& v) { + M[r][0] = v.x; + M[r][1] = v.y; + M[r][2] = v.z; + } + + inline T Determinant() const { + const Matrix3& m = *this; + T d; + + d = m.M[0][0] * (m.M[1][1] * m.M[2][2] - m.M[1][2] * m.M[2][1]); + d -= m.M[0][1] * (m.M[1][0] * m.M[2][2] - m.M[1][2] * m.M[2][0]); + d += m.M[0][2] * (m.M[1][0] * m.M[2][1] - m.M[1][1] * m.M[2][0]); + + return d; + } + + inline Matrix3 Inverse() const { + Matrix3 a; + const Matrix3& m = *this; + T d = Determinant(); + + OVR_MATH_ASSERT(d != 0); + T s = T(1) / d; + + a.M[0][0] = s * (m.M[1][1] * m.M[2][2] - m.M[1][2] * m.M[2][1]); + a.M[1][0] = s * (m.M[1][2] * m.M[2][0] - m.M[1][0] * m.M[2][2]); + a.M[2][0] = s * (m.M[1][0] * m.M[2][1] - m.M[1][1] * m.M[2][0]); + + a.M[0][1] = s * (m.M[0][2] * m.M[2][1] - m.M[0][1] * m.M[2][2]); + a.M[1][1] = s * (m.M[0][0] * m.M[2][2] - m.M[0][2] * m.M[2][0]); + a.M[2][1] = s * (m.M[0][1] * m.M[2][0] - m.M[0][0] * m.M[2][1]); + + a.M[0][2] = s * (m.M[0][1] * m.M[1][2] - m.M[0][2] * m.M[1][1]); + a.M[1][2] = s * (m.M[0][2] * m.M[1][0] - m.M[0][0] * m.M[1][2]); + a.M[2][2] = s * (m.M[0][0] * m.M[1][1] - m.M[0][1] * m.M[1][0]); + + return a; + } + + // Outer Product of two column vectors: a * b.Transpose() + static Matrix3 OuterProduct(const Vector3& a, const Vector3& b) { + return Matrix3( + a.x * b.x, + a.x * b.y, + a.x * b.z, + a.y * b.x, + a.y * b.y, + a.y * b.z, + a.z * b.x, + a.z * b.y, + a.z * b.z); + } + + // Vector cross product as a premultiply matrix: + // L.Cross(R) = LeftCrossAsMatrix(L) * R + static Matrix3 LeftCrossAsMatrix(const Vector3& L) { + return Matrix3(T(0), -L.z, +L.y, +L.z, T(0), -L.x, -L.y, +L.x, T(0)); + } + + // Vector cross product as a premultiply matrix: + // L.Cross(R) = RightCrossAsMatrix(R) * L + static Matrix3 RightCrossAsMatrix(const Vector3& R) { + return Matrix3(T(0), +R.z, -R.y, -R.z, T(0), +R.x, +R.y, -R.x, T(0)); + } + + // Angle in radians of a rotation matrix + // Uses identity trace(a) = 2*cos(theta) + 1 + T Angle() const { + return Acos((Trace() - T(1)) * T(0.5)); + } + + // Angle in radians between two rotation matrices + T Angle(const Matrix3& b) const { + // Compute trace of (this->Transposed() * b) + // This works out to sum of products of elements. + T trace = T(0); + for (int i = 0; i < 3; i++) { + for (int j = 0; j < 3; j++) { + trace += M[i][j] * b.M[i][j]; + } + } + return Acos((trace - T(1)) * T(0.5)); + } +}; + +typedef Matrix3 Matrix3f; +typedef Matrix3 Matrix3d; + +//------------------------------------------------------------------------------------- +// ***** Matrix2 + +template +class Matrix2 { + public: + typedef T ElementType; + static const size_t Dimension = 2; + + T M[2][2]; + + enum NoInitType { NoInit }; + + // Construct with no memory initialization. + Matrix2(NoInitType) {} + + // By default, we construct identity matrix. + Matrix2() { + M[0][0] = M[1][1] = T(1); + M[0][1] = M[1][0] = T(0); + } + + Matrix2(T m11, T m12, T m21, T m22) { + M[0][0] = m11; + M[0][1] = m12; + M[1][0] = m21; + M[1][1] = m22; + } + + // Construction from X, Y basis vectors + Matrix2(const Vector2& xBasis, const Vector2& yBasis) { + M[0][0] = xBasis.x; + M[0][1] = yBasis.x; + M[1][0] = xBasis.y; + M[1][1] = yBasis.y; + } + + explicit Matrix2(T s) { + M[0][0] = M[1][1] = s; + M[0][1] = M[1][0] = T(0); + } + + Matrix2(T m11, T m22) { + M[0][0] = m11; + M[0][1] = T(0); + M[1][0] = T(0); + M[1][1] = m22; + } + + explicit Matrix2(const Matrix2::OtherFloatType>& src) { + M[0][0] = T(src.M[0][0]); + M[0][1] = T(src.M[0][1]); + M[1][0] = T(src.M[1][0]); + M[1][1] = T(src.M[1][1]); + } + + // C-interop support + Matrix2(const typename CompatibleTypes>::Type& s) { + OVR_MATH_STATIC_ASSERT(sizeof(s) == sizeof(Matrix2), "sizeof(s) == sizeof(Matrix2)"); + memcpy(M, s.M, sizeof(M)); + } + + operator const typename CompatibleTypes>::Type() const { + typename CompatibleTypes>::Type result; + OVR_MATH_STATIC_ASSERT(sizeof(result) == sizeof(Matrix2), "sizeof(result) == sizeof(Matrix2)"); + memcpy(result.M, M, sizeof(M)); + return result; + } + + T operator()(int i, int j) const { + return M[i][j]; + } + T& operator()(int i, int j) { + return M[i][j]; + } + const T* operator[](int i) const { + return M[i]; + } + T* operator[](int i) { + return M[i]; + } + + static Matrix2 Identity() { + return Matrix2(); + } + + void SetIdentity() { + M[0][0] = M[1][1] = T(1); + M[0][1] = M[1][0] = T(0); + } + + static Matrix2 Diagonal(T m00, T m11) { + return Matrix2(m00, m11); + } + static Matrix2 Diagonal(const Vector2& v) { + return Matrix2(v.x, v.y); + } + + T Trace() const { + return M[0][0] + M[1][1]; + } + + bool operator==(const Matrix2& b) const { + return M[0][0] == b.M[0][0] && M[0][1] == b.M[0][1] && M[1][0] == b.M[1][0] && + M[1][1] == b.M[1][1]; + } + + Matrix2 operator+(const Matrix2& b) const { + return Matrix2( + M[0][0] + b.M[0][0], M[0][1] + b.M[0][1], M[1][0] + b.M[1][0], M[1][1] + b.M[1][1]); + } + + Matrix2& operator+=(const Matrix2& b) { + M[0][0] += b.M[0][0]; + M[0][1] += b.M[0][1]; + M[1][0] += b.M[1][0]; + M[1][1] += b.M[1][1]; + return *this; + } + + void operator=(const Matrix2& b) { + M[0][0] = b.M[0][0]; + M[0][1] = b.M[0][1]; + M[1][0] = b.M[1][0]; + M[1][1] = b.M[1][1]; + } + + Matrix2 operator-(const Matrix2& b) const { + return Matrix2( + M[0][0] - b.M[0][0], M[0][1] - b.M[0][1], M[1][0] - b.M[1][0], M[1][1] - b.M[1][1]); + } + + Matrix2& operator-=(const Matrix2& b) { + M[0][0] -= b.M[0][0]; + M[0][1] -= b.M[0][1]; + M[1][0] -= b.M[1][0]; + M[1][1] -= b.M[1][1]; + return *this; + } + + Matrix2 operator*(const Matrix2& b) const { + return Matrix2( + M[0][0] * b.M[0][0] + M[0][1] * b.M[1][0], + M[0][0] * b.M[0][1] + M[0][1] * b.M[1][1], + M[1][0] * b.M[0][0] + M[1][1] * b.M[1][0], + M[1][0] * b.M[0][1] + M[1][1] * b.M[1][1]); + } + + Matrix2& operator*=(const Matrix2& b) { + *this = *this * b; + return *this; + } + + Matrix2 operator*(T s) const { + return Matrix2(M[0][0] * s, M[0][1] * s, M[1][0] * s, M[1][1] * s); + } + + Matrix2& operator*=(T s) { + M[0][0] *= s; + M[0][1] *= s; + M[1][0] *= s; + M[1][1] *= s; + return *this; + } + + Matrix2 operator/(T s) const { + return *this * (T(1) / s); + } + + Matrix2& operator/=(T s) { + return *this *= (T(1) / s); + } + + Vector2 operator*(const Vector2& b) const { + return Vector2(M[0][0] * b.x + M[0][1] * b.y, M[1][0] * b.x + M[1][1] * b.y); + } + + Vector2 Transform(const Vector2& v) const { + return Vector2(M[0][0] * v.x + M[0][1] * v.y, M[1][0] * v.x + M[1][1] * v.y); + } + + Matrix2 Transposed() const { + return Matrix2(M[0][0], M[1][0], M[0][1], M[1][1]); + } + + void Transpose() { + OVRMath_Swap(M[1][0], M[0][1]); + } + + Vector2 GetColumn(int c) const { + return Vector2(M[0][c], M[1][c]); + } + + Vector2 GetRow(int r) const { + return Vector2(M[r][0], M[r][1]); + } + + void SetColumn(int c, const Vector2& v) { + M[0][c] = v.x; + M[1][c] = v.y; + } + + void SetRow(int r, const Vector2& v) { + M[r][0] = v.x; + M[r][1] = v.y; + } + + T Determinant() const { + return M[0][0] * M[1][1] - M[0][1] * M[1][0]; + } + + Matrix2 Inverse() const { + T rcpDet = T(1) / Determinant(); + return Matrix2(M[1][1] * rcpDet, -M[0][1] * rcpDet, -M[1][0] * rcpDet, M[0][0] * rcpDet); + } + + // Outer Product of two column vectors: a * b.Transpose() + static Matrix2 OuterProduct(const Vector2& a, const Vector2& b) { + return Matrix2(a.x * b.x, a.x * b.y, a.y * b.x, a.y * b.y); + } + + // Angle in radians between two rotation matrices + T Angle(const Matrix2& b) const { + const Matrix2& a = *this; + return Acos(a(0, 0) * b(0, 0) + a(1, 0) * b(1, 0)); + } +}; + +typedef Matrix2 Matrix2f; +typedef Matrix2 Matrix2d; + +//------------------------------------------------------------------------------------- + +template +class SymMat3 { + private: + typedef SymMat3 this_type; + + public: + typedef T Value_t; + // Upper symmetric + T v[6]; // _00 _01 _02 _11 _12 _22 + + inline SymMat3() {} + + inline explicit SymMat3(T s) { + v[0] = v[3] = v[5] = s; + v[1] = v[2] = v[4] = T(0); + } + + inline explicit SymMat3(T a00, T a01, T a02, T a11, T a12, T a22) { + v[0] = a00; + v[1] = a01; + v[2] = a02; + v[3] = a11; + v[4] = a12; + v[5] = a22; + } + + // Cast to symmetric Matrix3 + operator Matrix3() const { + return Matrix3(v[0], v[1], v[2], v[1], v[3], v[4], v[2], v[4], v[5]); + } + + static inline int Index(unsigned int i, unsigned int j) { + return (i <= j) ? (3 * i - i * (i + 1) / 2 + j) : (3 * j - j * (j + 1) / 2 + i); + } + + inline T operator()(int i, int j) const { + return v[Index(i, j)]; + } + + inline T& operator()(int i, int j) { + return v[Index(i, j)]; + } + + inline this_type& operator+=(const this_type& b) { + v[0] += b.v[0]; + v[1] += b.v[1]; + v[2] += b.v[2]; + v[3] += b.v[3]; + v[4] += b.v[4]; + v[5] += b.v[5]; + return *this; + } + + inline this_type& operator-=(const this_type& b) { + v[0] -= b.v[0]; + v[1] -= b.v[1]; + v[2] -= b.v[2]; + v[3] -= b.v[3]; + v[4] -= b.v[4]; + v[5] -= b.v[5]; + + return *this; + } + + inline this_type& operator*=(T s) { + v[0] *= s; + v[1] *= s; + v[2] *= s; + v[3] *= s; + v[4] *= s; + v[5] *= s; + + return *this; + } + + inline SymMat3 operator*(T s) const { + SymMat3 d; + d.v[0] = v[0] * s; + d.v[1] = v[1] * s; + d.v[2] = v[2] * s; + d.v[3] = v[3] * s; + d.v[4] = v[4] * s; + d.v[5] = v[5] * s; + + return d; + } + + // Multiplies two matrices into destination with minimum copying. + static SymMat3& Multiply(SymMat3* d, const SymMat3& a, const SymMat3& b) { + // _00 _01 _02 _11 _12 _22 + + d->v[0] = a.v[0] * b.v[0]; + d->v[1] = a.v[0] * b.v[1] + a.v[1] * b.v[3]; + d->v[2] = a.v[0] * b.v[2] + a.v[1] * b.v[4]; + + d->v[3] = a.v[3] * b.v[3]; + d->v[4] = a.v[3] * b.v[4] + a.v[4] * b.v[5]; + + d->v[5] = a.v[5] * b.v[5]; + + return *d; + } + + inline T Determinant() const { + const this_type& m = *this; + T d; + + d = m(0, 0) * (m(1, 1) * m(2, 2) - m(1, 2) * m(2, 1)); + d -= m(0, 1) * (m(1, 0) * m(2, 2) - m(1, 2) * m(2, 0)); + d += m(0, 2) * (m(1, 0) * m(2, 1) - m(1, 1) * m(2, 0)); + + return d; + } + + inline this_type Inverse() const { + this_type a; + const this_type& m = *this; + T d = Determinant(); + + OVR_MATH_ASSERT(d != 0); + T s = T(1) / d; + + a(0, 0) = s * (m(1, 1) * m(2, 2) - m(1, 2) * m(2, 1)); + + a(0, 1) = s * (m(0, 2) * m(2, 1) - m(0, 1) * m(2, 2)); + a(1, 1) = s * (m(0, 0) * m(2, 2) - m(0, 2) * m(2, 0)); + + a(0, 2) = s * (m(0, 1) * m(1, 2) - m(0, 2) * m(1, 1)); + a(1, 2) = s * (m(0, 2) * m(1, 0) - m(0, 0) * m(1, 2)); + a(2, 2) = s * (m(0, 0) * m(1, 1) - m(0, 1) * m(1, 0)); + + return a; + } + + inline T Trace() const { + return v[0] + v[3] + v[5]; + } + + // M = a*a.t() + inline void Rank1(const Vector3& a) { + v[0] = a.x * a.x; + v[1] = a.x * a.y; + v[2] = a.x * a.z; + v[3] = a.y * a.y; + v[4] = a.y * a.z; + v[5] = a.z * a.z; + } + + // M += a*a.t() + inline void Rank1Add(const Vector3& a) { + v[0] += a.x * a.x; + v[1] += a.x * a.y; + v[2] += a.x * a.z; + v[3] += a.y * a.y; + v[4] += a.y * a.z; + v[5] += a.z * a.z; + } + + // M -= a*a.t() + inline void Rank1Sub(const Vector3& a) { + v[0] -= a.x * a.x; + v[1] -= a.x * a.y; + v[2] -= a.x * a.z; + v[3] -= a.y * a.y; + v[4] -= a.y * a.z; + v[5] -= a.z * a.z; + } +}; + +typedef SymMat3 SymMat3f; +typedef SymMat3 SymMat3d; + +template +inline Matrix3 operator*(const SymMat3& a, const SymMat3& b) { +#define AJB_ARBC(r, c) (a(r, 0) * b(0, c) + a(r, 1) * b(1, c) + a(r, 2) * b(2, c)) + return Matrix3( + AJB_ARBC(0, 0), + AJB_ARBC(0, 1), + AJB_ARBC(0, 2), + AJB_ARBC(1, 0), + AJB_ARBC(1, 1), + AJB_ARBC(1, 2), + AJB_ARBC(2, 0), + AJB_ARBC(2, 1), + AJB_ARBC(2, 2)); +#undef AJB_ARBC +} + +template +inline Matrix3 operator*(const Matrix3& a, const SymMat3& b) { +#define AJB_ARBC(r, c) (a(r, 0) * b(0, c) + a(r, 1) * b(1, c) + a(r, 2) * b(2, c)) + return Matrix3( + AJB_ARBC(0, 0), + AJB_ARBC(0, 1), + AJB_ARBC(0, 2), + AJB_ARBC(1, 0), + AJB_ARBC(1, 1), + AJB_ARBC(1, 2), + AJB_ARBC(2, 0), + AJB_ARBC(2, 1), + AJB_ARBC(2, 2)); +#undef AJB_ARBC +} + +//------------------------------------------------------------------------------------- +// ***** Angle + +// Cleanly representing the algebra of 2D rotations. +// The operations maintain the angle between -Pi and Pi, the same range as atan2. + +template +class Angle { + public: + enum AngularUnits { Radians = 0, Degrees = 1 }; + + Angle() : a(0) {} + + // Fix the range to be between -Pi and Pi + Angle(T a_, AngularUnits u = Radians) + : a((u == Radians) ? a_ : a_ * ((T)MATH_DOUBLE_DEGREETORADFACTOR)) { + FixRange(); + } + + T Get(AngularUnits u = Radians) const { + return (u == Radians) ? a : a * ((T)MATH_DOUBLE_RADTODEGREEFACTOR); + } + void Set(const T& x, AngularUnits u = Radians) { + a = (u == Radians) ? x : x * ((T)MATH_DOUBLE_DEGREETORADFACTOR); + FixRange(); + } + int Sign() const { + if (a == 0) + return 0; + else + return (a > 0) ? 1 : -1; + } + T Abs() const { + return (a >= 0) ? a : -a; + } + + bool operator==(const Angle& b) const { + return a == b.a; + } + bool operator!=(const Angle& b) const { + return a != b.a; + } + // bool operator< (const Angle& b) const { return a < a.b; } + // bool operator> (const Angle& b) const { return a > a.b; } + // bool operator<= (const Angle& b) const { return a <= a.b; } + // bool operator>= (const Angle& b) const { return a >= a.b; } + // bool operator= (const T& x) { a = x; FixRange(); } + + // These operations assume a is already between -Pi and Pi. + Angle& operator+=(const Angle& b) { + a = a + b.a; + FastFixRange(); + return *this; + } + Angle& operator+=(const T& x) { + a = a + x; + FixRange(); + return *this; + } + Angle operator+(const Angle& b) const { + Angle res = *this; + res += b; + return res; + } + Angle operator+(const T& x) const { + Angle res = *this; + res += x; + return res; + } + Angle& operator-=(const Angle& b) { + a = a - b.a; + FastFixRange(); + return *this; + } + Angle& operator-=(const T& x) { + a = a - x; + FixRange(); + return *this; + } + Angle operator-(const Angle& b) const { + Angle res = *this; + res -= b; + return res; + } + Angle operator-(const T& x) const { + Angle res = *this; + res -= x; + return res; + } + + T Distance(const Angle& b) { + T c = fabs(a - b.a); + return (c <= ((T)MATH_DOUBLE_PI)) ? c : ((T)MATH_DOUBLE_TWOPI) - c; + } + + private: + // The stored angle, which should be maintained between -Pi and Pi + T a; + + // Fixes the angle range to [-Pi,Pi], but assumes no more than 2Pi away on either side + inline void FastFixRange() { + if (a < -((T)MATH_DOUBLE_PI)) + a += ((T)MATH_DOUBLE_TWOPI); + else if (a > ((T)MATH_DOUBLE_PI)) + a -= ((T)MATH_DOUBLE_TWOPI); + } + + // Fixes the angle range to [-Pi,Pi] for any given range, but slower then the fast method + inline void FixRange() { + // do nothing if the value is already in the correct range, since fmod call is expensive + if (a >= -((T)MATH_DOUBLE_PI) && a <= ((T)MATH_DOUBLE_PI)) + return; + a = fmod(a, ((T)MATH_DOUBLE_TWOPI)); + if (a < -((T)MATH_DOUBLE_PI)) + a += ((T)MATH_DOUBLE_TWOPI); + else if (a > ((T)MATH_DOUBLE_PI)) + a -= ((T)MATH_DOUBLE_TWOPI); + } +}; + +typedef Angle Anglef; +typedef Angle Angled; + +//------------------------------------------------------------------------------------- +// ***** Plane + +// Consists of a normal vector and distance from the origin where the plane is located. + +template +class Plane { + public: + Vector3 N; + T D; + + Plane() : D(0) {} + + // Normals must already be normalized + Plane(const Vector3& n, T d) : N(n), D(d) {} + Plane(T x, T y, T z, T d) : N(x, y, z), D(d) {} + + // construct from a point on the plane and the normal + Plane(const Vector3& p, const Vector3& n) : N(n), D(-(p.Dot(n))) {} + + // Find the point to plane distance. The sign indicates what side of the plane the point is on (0 + // = point on plane). + T TestSide(const Vector3& p) const { + return (N.Dot(p)) + D; + } + + Plane Flipped() const { + return Plane(-N, -D); + } + + void Flip() { + N = -N; + D = -D; + } + + bool operator==(const Plane& rhs) const { + return (this->D == rhs.D && this->N == rhs.N); + } +}; + +typedef Plane Planef; +typedef Plane Planed; + +//----------------------------------------------------------------------------------- +// ***** ScaleAndOffset2D + +struct ScaleAndOffset2D { + Vector2f Scale; + Vector2f Offset; + + ScaleAndOffset2D(float sx = 0.0f, float sy = 0.0f, float ox = 0.0f, float oy = 0.0f) + : Scale(sx, sy), Offset(ox, oy) {} +}; + +//----------------------------------------------------------------------------------- +// ***** FovPort + +// FovPort describes Field Of View (FOV) of a viewport. +// This class has values for up, down, left and right, stored in +// tangent of the angle units to simplify calculations. +// +// As an example, for a standard 90 degree vertical FOV, we would +// have: { UpTan = tan(90 degrees / 2), DownTan = tan(90 degrees / 2) }. +// +// CreateFromRadians/Degrees helper functions can be used to +// access FOV in different units. + +// ***** FovPort + +struct FovPort { + float UpTan; + float DownTan; + float LeftTan; + float RightTan; + + FovPort(float sideTan = 0.0f) + : UpTan(sideTan), DownTan(sideTan), LeftTan(sideTan), RightTan(sideTan) {} + FovPort(float u, float d, float l, float r) : UpTan(u), DownTan(d), LeftTan(l), RightTan(r) {} + +#ifndef OVR_EXCLUDE_CAPI_FROM_MATH + // C-interop support. + typedef CompatibleTypes::Type CompatibleType; + + FovPort(const CompatibleType& s) + : UpTan(s.UpTan), DownTan(s.DownTan), LeftTan(s.LeftTan), RightTan(s.RightTan) {} + + operator const CompatibleType&() const { + OVR_MATH_STATIC_ASSERT(sizeof(FovPort) == sizeof(CompatibleType), "sizeof(FovPort) failure"); + return reinterpret_cast(*this); + } +#endif + + static FovPort CreateFromRadians(float horizontalFov, float verticalFov) { + FovPort result; + result.UpTan = tanf(verticalFov * 0.5f); + result.DownTan = tanf(verticalFov * 0.5f); + result.LeftTan = tanf(horizontalFov * 0.5f); + result.RightTan = tanf(horizontalFov * 0.5f); + return result; + } + + static FovPort CreateFromDegrees(float horizontalFovDegrees, float verticalFovDegrees) { + return CreateFromRadians(DegreeToRad(horizontalFovDegrees), DegreeToRad(verticalFovDegrees)); + } + + // Get Horizontal/Vertical components of Fov in radians. + float GetVerticalFovRadians() const { + return atanf(UpTan) + atanf(DownTan); + } + float GetHorizontalFovRadians() const { + return atanf(LeftTan) + atanf(RightTan); + } + // Get Horizontal/Vertical components of Fov in degrees. + float GetVerticalFovDegrees() const { + return RadToDegree(GetVerticalFovRadians()); + } + float GetHorizontalFovDegrees() const { + return RadToDegree(GetHorizontalFovRadians()); + } + + // Compute maximum tangent value among all four sides. + float GetMaxSideTan() const { + return OVRMath_Max(OVRMath_Max(UpTan, DownTan), OVRMath_Max(LeftTan, RightTan)); + } + + static ScaleAndOffset2D CreateNDCScaleAndOffsetFromFov(FovPort tanHalfFov) { + float projXScale = 2.0f / (tanHalfFov.LeftTan + tanHalfFov.RightTan); + float projXOffset = (tanHalfFov.LeftTan - tanHalfFov.RightTan) * projXScale * 0.5f; + float projYScale = 2.0f / (tanHalfFov.UpTan + tanHalfFov.DownTan); + float projYOffset = (tanHalfFov.UpTan - tanHalfFov.DownTan) * projYScale * 0.5f; + + ScaleAndOffset2D result; + result.Scale = Vector2f(projXScale, projYScale); + result.Offset = Vector2f(projXOffset, projYOffset); + // Hey - why is that Y.Offset negated? + // It's because a projection matrix transforms from world coords with Y=up, + // whereas this is from NDC which is Y=down. + + return result; + } + + // Converts Fov Tan angle units to [-1,1] render target NDC space + Vector2f TanAngleToRendertargetNDC(Vector2f const& tanEyeAngle) { + ScaleAndOffset2D eyeToSourceNDC = CreateNDCScaleAndOffsetFromFov(*this); + return tanEyeAngle * eyeToSourceNDC.Scale + eyeToSourceNDC.Offset; + } + + // Compute per-channel minimum and maximum of Fov. + static FovPort Min(const FovPort& a, const FovPort& b) { + FovPort fov( + OVRMath_Min(a.UpTan, b.UpTan), + OVRMath_Min(a.DownTan, b.DownTan), + OVRMath_Min(a.LeftTan, b.LeftTan), + OVRMath_Min(a.RightTan, b.RightTan)); + return fov; + } + + static FovPort Max(const FovPort& a, const FovPort& b) { + FovPort fov( + OVRMath_Max(a.UpTan, b.UpTan), + OVRMath_Max(a.DownTan, b.DownTan), + OVRMath_Max(a.LeftTan, b.LeftTan), + OVRMath_Max(a.RightTan, b.RightTan)); + return fov; + } + + static FovPort Uncant(const FovPort& cantedFov, Quatf canting) { + FovPort uncantedFov = cantedFov; + + // make 3D vectors from the FovPorts projected to z=1 plane + Vector3f leftUp = Vector3f(cantedFov.LeftTan, cantedFov.UpTan, 1.0f); + Vector3f rightUp = Vector3f(-cantedFov.RightTan, cantedFov.UpTan, 1.0f); + Vector3f leftDown = Vector3f(cantedFov.LeftTan, -cantedFov.DownTan, 1.0f); + Vector3f rightDown = Vector3f(-cantedFov.RightTan, -cantedFov.DownTan, 1.0f); + + // rotate these vectors using the canting specified + leftUp = canting.Rotate(leftUp); + rightUp = canting.Rotate(rightUp); + leftDown = canting.Rotate(leftDown); + rightDown = canting.Rotate(rightDown); + + // If the z coordinates of any of the corners end up being really small or negative, then + // projection will generate extremely large or inverted frustums and we don't really want that + const float kMinValidZ = 0.01f; + + // re-project back to z=1 plane while making sure we don't generate gigantic values (hence max) + leftUp /= OVRMath_Max(leftUp.z, kMinValidZ); + rightUp /= OVRMath_Max(rightUp.z, kMinValidZ); + leftDown /= OVRMath_Max(leftDown.z, kMinValidZ); + rightDown /= OVRMath_Max(rightDown.z, kMinValidZ); + + // generate new FovTans as "bounding box" values + uncantedFov.UpTan = OVRMath_Max(leftUp.y, rightUp.y); + uncantedFov.DownTan = OVRMath_Max(-leftDown.y, -rightDown.y); + uncantedFov.LeftTan = OVRMath_Max(leftUp.x, leftDown.x); + uncantedFov.RightTan = OVRMath_Max(-rightDown.x, -rightUp.x); + + return uncantedFov; + } + + template + static FovPort ScaleFovPort(const FovPort& fov, OVR::Vector2 scaleFactors) { + FovPort retFov = FovPort(fov); + retFov.LeftTan *= ((scaleFactors.x != 0.0) ? scaleFactors.x : 1.0f); + retFov.RightTan *= ((scaleFactors.x != 0.0) ? scaleFactors.x : 1.0f); + retFov.UpTan *= ((scaleFactors.y != 0.0) ? scaleFactors.y : 1.0f); + retFov.DownTan *= ((scaleFactors.y != 0.0) ? scaleFactors.y : 1.0f); + return retFov; + } +}; + +} // Namespace OVR + +#if defined(_MSC_VER) +#pragma warning(pop) +#endif + +#endif diff --git a/Include/Extras/OVR_StereoProjection.h b/Include/Extras/OVR_StereoProjection.h new file mode 100755 index 0000000..e3fe4f1 --- /dev/null +++ b/Include/Extras/OVR_StereoProjection.h @@ -0,0 +1,73 @@ +/************************************************************************************ + +Filename : OVR_StereoProjection.h +Content : Stereo projection functions +Created : November 30, 2013 +Authors : Tom Fosyth + +Copyright : Copyright 2014-2016 Oculus VR, LLC All Rights reserved. + +Licensed under the Oculus VR Rift SDK License Version 3.3 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, +which is provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +You may obtain a copy of the License at + +http://www.oculusvr.com/licenses/LICENSE-3.3 + +Unless required by applicable law or agreed to in writing, the Oculus VR SDK +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*************************************************************************************/ + +#ifndef OVR_StereoProjection_h +#define OVR_StereoProjection_h + +#include "Extras/OVR_Math.h" + +namespace OVR { + +//----------------------------------------------------------------------------------- +// ***** Stereo Enumerations + +// StereoEye specifies which eye we are rendering for; it is used to +// retrieve StereoEyeParams. +enum StereoEye { StereoEye_Left, StereoEye_Right, StereoEye_Center }; + +//----------------------------------------------------------------------------------- +// ***** Propjection functions + +Matrix4f CreateProjection( + bool rightHanded, + bool isOpenGL, + FovPort fov, + StereoEye eye, + float zNear = 0.01f, + float zFar = 10000.0f, + bool flipZ = false, + bool farAtInfinity = false); + +Matrix4f CreateOrthoSubProjection( + bool rightHanded, + StereoEye eyeType, + float tanHalfFovX, + float tanHalfFovY, + float unitsX, + float unitsY, + float distanceFromCamera, + float interpupillaryDistance, + Matrix4f const& projection, + float zNear = 0.0f, + float zFar = 0.0f, + bool flipZ = false, + bool farAtInfinity = false); + +ScaleAndOffset2D CreateNDCScaleAndOffsetFromFov(FovPort fov); + +} // namespace OVR + +#endif // OVR_StereoProjection_h diff --git a/Include/OVR_CAPI.h b/Include/OVR_CAPI.h new file mode 100755 index 0000000..46b27bc --- /dev/null +++ b/Include/OVR_CAPI.h @@ -0,0 +1,3394 @@ +/************************************************************************************ + \file OVR_CAPI.h + \brief C Interface to the Oculus PC SDK tracking and rendering library. + \copyright Copyright 2014 Oculus VR, LLC All Rights reserved. + ************************************************************************************/ + +// We don't use version numbers within OVR_CAPI_h, as all versioned variations +// of this file are currently mutually exclusive. +#ifndef OVR_CAPI_h +#define OVR_CAPI_h + +#include "OVR_CAPI_Keys.h" +#include "OVR_Version.h" +#include "OVR_ErrorCode.h" + +#if !defined(_WIN32) +#include +#endif + + +#include + +#if defined(_MSC_VER) +#pragma warning(push) +#pragma warning(disable : 4324) // structure was padded due to __declspec(align()) +#pragma warning(disable : 4359) // The alignment specified for a type is less than the +// alignment of the type of one of its data members +#endif + +//----------------------------------------------------------------------------------- +// ***** OVR_OS +// +#if !defined(OVR_OS_WIN32) && defined(_WIN32) +#define OVR_OS_WIN32 +#endif + +#if !defined(OVR_OS_MAC) && defined(__APPLE__) +#define OVR_OS_MAC +#endif + +#if !defined(OVR_OS_LINUX) && defined(__linux__) +#define OVR_OS_LINUX +#endif + +//----------------------------------------------------------------------------------- +// ***** OVR_CPP +// +#if !defined(OVR_CPP) +#if defined(__cplusplus) +#define OVR_CPP(x) x +#else +#define OVR_CPP(x) /* Not C++ */ +#endif +#endif + +//----------------------------------------------------------------------------------- +// ***** OVR_CDECL +// +/// LibOVR calling convention for 32-bit Windows builds. +// +#if !defined(OVR_CDECL) +#if defined(_WIN32) +#define OVR_CDECL __cdecl +#else +#define OVR_CDECL +#endif +#endif + +//----------------------------------------------------------------------------------- +// ***** OVR_EXTERN_C +// +/// Defined as extern "C" when built from C++ code. +// +#if !defined(OVR_EXTERN_C) +#ifdef __cplusplus +#define OVR_EXTERN_C extern "C" +#else +#define OVR_EXTERN_C +#endif +#endif + +//----------------------------------------------------------------------------------- +// ***** OVR_PUBLIC_FUNCTION / OVR_PRIVATE_FUNCTION +// +// OVR_PUBLIC_FUNCTION - Functions that externally visible from a shared library. +// Corresponds to Microsoft __dllexport. +// OVR_PUBLIC_CLASS - C++ structs and classes that are externally visible from a +// shared library. Corresponds to Microsoft __dllexport. +// OVR_PRIVATE_FUNCTION - Functions that are not visible outside of a shared library. +// They are private to the shared library. +// OVR_PRIVATE_CLASS - C++ structs and classes that are not visible outside of a +// shared library. They are private to the shared library. +// +// OVR_DLL_BUILD - Used to indicate that the current compilation unit is of a shared library. +// OVR_DLL_IMPORT - Used to indicate that the current compilation unit is a +// user of the corresponding shared library. +// OVR_STATIC_BUILD - used to indicate that the current compilation unit is not a +// shared library but rather statically linked code. +// +#if !defined(OVR_PUBLIC_FUNCTION) +#if defined(OVR_DLL_BUILD) +#if defined(_WIN32) +#define OVR_PUBLIC_FUNCTION(rval) OVR_EXTERN_C __declspec(dllexport) rval OVR_CDECL +#define OVR_PUBLIC_CLASS __declspec(dllexport) +#define OVR_PRIVATE_FUNCTION(rval) rval OVR_CDECL +#define OVR_PRIVATE_CLASS +#else +#define OVR_PUBLIC_FUNCTION(rval) \ + OVR_EXTERN_C __attribute__((visibility("default"))) rval OVR_CDECL /* Requires GCC 4.0+ */ +#define OVR_PUBLIC_CLASS __attribute__((visibility("default"))) /* Requires GCC 4.0+ */ +#define OVR_PRIVATE_FUNCTION(rval) __attribute__((visibility("hidden"))) rval OVR_CDECL +#define OVR_PRIVATE_CLASS __attribute__((visibility("hidden"))) +#endif +#elif defined(OVR_DLL_IMPORT) +#if defined(_WIN32) +#define OVR_PUBLIC_FUNCTION(rval) OVR_EXTERN_C __declspec(dllimport) rval OVR_CDECL +#define OVR_PUBLIC_CLASS __declspec(dllimport) +#else +#define OVR_PUBLIC_FUNCTION(rval) OVR_EXTERN_C rval OVR_CDECL +#define OVR_PUBLIC_CLASS +#endif +#define OVR_PRIVATE_FUNCTION(rval) rval OVR_CDECL +#define OVR_PRIVATE_CLASS +#else // OVR_STATIC_BUILD +#define OVR_PUBLIC_FUNCTION(rval) OVR_EXTERN_C rval OVR_CDECL +#define OVR_PUBLIC_CLASS +#define OVR_PRIVATE_FUNCTION(rval) rval OVR_CDECL +#define OVR_PRIVATE_CLASS +#endif +#endif + +//----------------------------------------------------------------------------------- +// ***** OVR_EXPORT +// +/// Provided for backward compatibility with older versions of this library. +// +#if !defined(OVR_EXPORT) +#ifdef OVR_OS_WIN32 +#define OVR_EXPORT __declspec(dllexport) +#else +#define OVR_EXPORT +#endif +#endif + +//----------------------------------------------------------------------------------- +// ***** OVR_ALIGNAS +// +#if !defined(OVR_ALIGNAS) +#if defined(__GNUC__) || defined(__clang__) +#define OVR_ALIGNAS(n) __attribute__((aligned(n))) +#elif defined(_MSC_VER) || defined(__INTEL_COMPILER) +#define OVR_ALIGNAS(n) __declspec(align(n)) +#elif defined(__CC_ARM) +#define OVR_ALIGNAS(n) __align(n) +#else +#error Need to define OVR_ALIGNAS +#endif +#endif + +//----------------------------------------------------------------------------------- +// ***** OVR_CC_HAS_FEATURE +// +// This is a portable way to use compile-time feature identification available +// with some compilers in a clean way. Direct usage of __has_feature in preprocessing +// statements of non-supporting compilers results in a preprocessing error. +// +// Example usage: +// #if OVR_CC_HAS_FEATURE(is_pod) +// if(__is_pod(T)) // If the type is plain data then we can safely memcpy it. +// memcpy(&destObject, &srcObject, sizeof(object)); +// #endif +// +#if !defined(OVR_CC_HAS_FEATURE) +#if defined(__clang__) // http://clang.llvm.org/docs/LanguageExtensions.html#id2 +#define OVR_CC_HAS_FEATURE(x) __has_feature(x) +#else +#define OVR_CC_HAS_FEATURE(x) 0 +#endif +#endif + +// ------------------------------------------------------------------------ +// ***** OVR_STATIC_ASSERT +// +// Portable support for C++11 static_assert(). +// Acts as if the following were declared: +// void OVR_STATIC_ASSERT(bool const_expression, const char* msg); +// +// Example usage: +// OVR_STATIC_ASSERT(sizeof(int32_t) == 4, "int32_t expected to be 4 bytes."); + +#if !defined(OVR_STATIC_ASSERT) +#if !(defined(__cplusplus) && (__cplusplus >= 201103L)) /* Other */ && \ + !(defined(__GXX_EXPERIMENTAL_CXX0X__)) /* GCC */ && \ + !(defined(__clang__) && defined(__cplusplus) && \ + OVR_CC_HAS_FEATURE(cxx_static_assert)) /* clang */ \ + && !(defined(_MSC_VER) && (_MSC_VER >= 1600) && defined(__cplusplus)) /* VS2010+ */ + +#if !defined(OVR_SA_UNUSED) +#if defined(OVR_CC_GNU) || defined(OVR_CC_CLANG) +#define OVR_SA_UNUSED __attribute__((unused)) +#else +#define OVR_SA_UNUSED +#endif +#define OVR_SA_PASTE(a, b) a##b +#define OVR_SA_HELP(a, b) OVR_SA_PASTE(a, b) +#endif + +#if defined(__COUNTER__) +#define OVR_STATIC_ASSERT(expression, msg) \ + typedef char OVR_SA_HELP(staticAssert, __COUNTER__)[((expression) != 0) ? 1 : -1] OVR_SA_UNUSED +#else +#define OVR_STATIC_ASSERT(expression, msg) \ + typedef char OVR_SA_HELP(staticAssert, __LINE__)[((expression) != 0) ? 1 : -1] OVR_SA_UNUSED +#endif + +#else +#define OVR_STATIC_ASSERT(expression, msg) static_assert(expression, msg) +#endif +#endif + +//----------------------------------------------------------------------------------- +// ***** Padding +// +/// Defines explicitly unused space for a struct. +/// When used correcly, usage of this macro should not change the size of the struct. +/// Compile-time and runtime behavior with and without this defined should be identical. +/// +#if !defined(OVR_UNUSED_STRUCT_PAD) +#define OVR_UNUSED_STRUCT_PAD(padName, size) char padName[size]; +#endif + +//----------------------------------------------------------------------------------- +// ***** Word Size +// +/// Specifies the size of a pointer on the given platform. +/// +#if !defined(OVR_PTR_SIZE) +#if defined(__WORDSIZE) +#define OVR_PTR_SIZE ((__WORDSIZE) / 8) +#elif defined(_WIN64) || defined(__LP64__) || defined(_LP64) || defined(_M_IA64) || \ + defined(__ia64__) || defined(__arch64__) || defined(__64BIT__) || defined(__Ptr_Is_64) +#define OVR_PTR_SIZE 8 +#elif defined(__CC_ARM) && (__sizeof_ptr == 8) +#define OVR_PTR_SIZE 8 +#else +#define OVR_PTR_SIZE 4 +#endif +#endif + +//----------------------------------------------------------------------------------- +// ***** OVR_ON32 / OVR_ON64 +// +#if OVR_PTR_SIZE == 8 +#define OVR_ON32(x) +#define OVR_ON64(x) x +#else +#define OVR_ON32(x) x +#define OVR_ON64(x) +#endif + +//----------------------------------------------------------------------------------- +// ***** ovrBool + +typedef char ovrBool; ///< Boolean type +#define ovrFalse 0 ///< ovrBool value of false. +#define ovrTrue 1 ///< ovrBool value of true. + +//----------------------------------------------------------------------------------- +// ***** Simple Math Structures + +/// A RGBA color with normalized float components. +typedef struct OVR_ALIGNAS(4) ovrColorf_ { + float r, g, b, a; +} ovrColorf; + +/// A 2D vector with integer components. +typedef struct OVR_ALIGNAS(4) ovrVector2i_ { + int x, y; +} ovrVector2i; + +/// A 2D size with integer components. +typedef struct OVR_ALIGNAS(4) ovrSizei_ { + int w, h; +} ovrSizei; + +/// A 2D rectangle with a position and size. +/// All components are integers. +typedef struct OVR_ALIGNAS(4) ovrRecti_ { + ovrVector2i Pos; + ovrSizei Size; +} ovrRecti; + +/// A quaternion rotation. +typedef struct OVR_ALIGNAS(4) ovrQuatf_ { + float x, y, z, w; +} ovrQuatf; + +/// A 2D vector with float components. +typedef struct OVR_ALIGNAS(4) ovrVector2f_ { + float x, y; +} ovrVector2f; + +/// A 3D vector with float components. +typedef struct OVR_ALIGNAS(4) ovrVector3f_ { + float x, y, z; +} ovrVector3f; + +/// A 4x4 matrix with float elements. +typedef struct OVR_ALIGNAS(4) ovrMatrix4f_ { + float M[4][4]; +} ovrMatrix4f; + +/// Position and orientation together. +/// The coordinate system used is right-handed Cartesian. +typedef struct OVR_ALIGNAS(4) ovrPosef_ { + ovrQuatf Orientation; + ovrVector3f Position; +} ovrPosef; + +/// A full pose (rigid body) configuration with first and second derivatives. +/// +/// Body refers to any object for which ovrPoseStatef is providing data. +/// It can be the HMD, Touch controller, sensor or something else. The context +/// depends on the usage of the struct. +typedef struct OVR_ALIGNAS(8) ovrPoseStatef_ { + ovrPosef ThePose; ///< Position and orientation. + ovrVector3f AngularVelocity; ///< Angular velocity in radians per second. + ovrVector3f LinearVelocity; ///< Velocity in meters per second. + ovrVector3f AngularAcceleration; ///< Angular acceleration in radians per second per second. + ovrVector3f LinearAcceleration; ///< Acceleration in meters per second per second. + OVR_UNUSED_STRUCT_PAD(pad0, 4) ///< \internal struct pad. + double TimeInSeconds; ///< Absolute time that this pose refers to. \see ovr_GetTimeInSeconds +} ovrPoseStatef; + +/// Describes the up, down, left, and right angles of the field of view. +/// +/// Field Of View (FOV) tangent of the angle units. +/// \note For a standard 90 degree vertical FOV, we would +/// have: { UpTan = tan(90 degrees / 2), DownTan = tan(90 degrees / 2) }. +typedef struct OVR_ALIGNAS(4) ovrFovPort_ { + float UpTan; ///< Tangent of the angle between the viewing vector and top edge of the FOV. + float DownTan; ///< Tangent of the angle between the viewing vector and bottom edge of the FOV. + float LeftTan; ///< Tangent of the angle between the viewing vector and left edge of the FOV. + float RightTan; ///< Tangent of the angle between the viewing vector and right edge of the FOV. +} ovrFovPort; + +//----------------------------------------------------------------------------------- +// ***** HMD Types + +/// Enumerates all HMD types that we support. +/// +/// The currently released developer kits are ovrHmd_DK1 and ovrHmd_DK2. +/// The other enumerations are for internal use only. +typedef enum ovrHmdType_ { + ovrHmd_None = 0, + ovrHmd_DK1 = 3, + ovrHmd_DKHD = 4, + ovrHmd_DK2 = 6, + ovrHmd_CB = 8, + ovrHmd_Other = 9, + ovrHmd_E3_2015 = 10, + ovrHmd_ES06 = 11, + ovrHmd_ES09 = 12, + ovrHmd_ES11 = 13, + ovrHmd_CV1 = 14, + + ovrHmd_EnumSize = 0x7fffffff ///< \internal Force type int32_t. +} ovrHmdType; + +/// HMD capability bits reported by device. +/// +typedef enum ovrHmdCaps_ { + // Read-only flags + + /// (read only) Specifies that the HMD is a virtual debug device. + ovrHmdCap_DebugDevice = 0x0010, + + + ovrHmdCap_EnumSize = 0x7fffffff ///< \internal Force type int32_t. +} ovrHmdCaps; + +/// Tracking capability bits reported by the device. +/// Used with ovr_GetTrackingCaps. +typedef enum ovrTrackingCaps_ { + ovrTrackingCap_Orientation = 0x0010, ///< Supports orientation tracking (IMU). + ovrTrackingCap_MagYawCorrection = 0x0020, ///< Supports yaw drift correction. + ovrTrackingCap_Position = 0x0040, ///< Supports positional tracking. + ovrTrackingCap_EnumSize = 0x7fffffff ///< \internal Force type int32_t. +} ovrTrackingCaps; + +/// Optional extensions +typedef enum ovrExtensions_ { + ovrExtension_TextureLayout_Octilinear = 0, ///< Enable before first layer submission. + ovrExtension_Count, ///< \internal Sanity checking + ovrExtension_EnumSize = 0x7fffffff ///< \internal Force type int32_t. +} ovrExtensions; + +/// Specifies which eye is being used for rendering. +/// This type explicitly does not include a third "NoStereo" monoscopic option, +/// as such is not required for an HMD-centered API. +typedef enum ovrEyeType_ { + ovrEye_Left = 0, ///< The left eye, from the viewer's perspective. + ovrEye_Right = 1, ///< The right eye, from the viewer's perspective. + ovrEye_Count = 2, ///< \internal Count of enumerated elements. + ovrEye_EnumSize = 0x7fffffff ///< \internal Force type int32_t. +} ovrEyeType; + +/// Specifies the coordinate system ovrTrackingState returns tracking poses in. +/// Used with ovr_SetTrackingOriginType() +typedef enum ovrTrackingOrigin_ { + /// \brief Tracking system origin reported at eye (HMD) height + /// \details Prefer using this origin when your application requires + /// matching user's current physical head pose to a virtual head pose + /// without any regards to a the height of the floor. Cockpit-based, + /// or 3rd-person experiences are ideal candidates. + /// When used, all poses in ovrTrackingState are reported as an offset + /// transform from the profile calibrated or recentered HMD pose. + /// It is recommended that apps using this origin type call ovr_RecenterTrackingOrigin + /// prior to starting the VR experience, but notify the user before doing so + /// to make sure the user is in a comfortable pose, facing a comfortable + /// direction. + ovrTrackingOrigin_EyeLevel = 0, + + /// \brief Tracking system origin reported at floor height + /// \details Prefer using this origin when your application requires the + /// physical floor height to match the virtual floor height, such as + /// standing experiences. + /// When used, all poses in ovrTrackingState are reported as an offset + /// transform from the profile calibrated floor pose. Calling ovr_RecenterTrackingOrigin + /// will recenter the X & Z axes as well as yaw, but the Y-axis (i.e. height) will continue + /// to be reported using the floor height as the origin for all poses. + ovrTrackingOrigin_FloorLevel = 1, + + ovrTrackingOrigin_Count = 2, ///< \internal Count of enumerated elements. + ovrTrackingOrigin_EnumSize = 0x7fffffff ///< \internal Force type int32_t. +} ovrTrackingOrigin; + +/// Identifies a graphics device in a platform-specific way. +/// For Windows this is a LUID type. +typedef struct OVR_ALIGNAS(OVR_PTR_SIZE) ovrGraphicsLuid_ { + // Public definition reserves space for graphics API-specific implementation + char Reserved[8]; +} ovrGraphicsLuid; + +/// This is a complete descriptor of the HMD. +typedef struct OVR_ALIGNAS(OVR_PTR_SIZE) ovrHmdDesc_ { + ovrHmdType Type; ///< The type of HMD. + OVR_ON64(OVR_UNUSED_STRUCT_PAD(pad0, 4)) ///< \internal struct paddding. + char ProductName[64]; ///< UTF8-encoded product identification string (e.g. "Oculus Rift DK1"). + char Manufacturer[64]; ///< UTF8-encoded HMD manufacturer identification string. + short VendorId; ///< HID (USB) vendor identifier of the device. + short ProductId; ///< HID (USB) product identifier of the device. + char SerialNumber[24]; ///< HMD serial number. + short FirmwareMajor; ///< HMD firmware major version. + short FirmwareMinor; ///< HMD firmware minor version. + unsigned int AvailableHmdCaps; ///< Available ovrHmdCaps bits. + unsigned int DefaultHmdCaps; ///< Default ovrHmdCaps bits. + unsigned int AvailableTrackingCaps; ///< Available ovrTrackingCaps bits. + unsigned int DefaultTrackingCaps; ///< Default ovrTrackingCaps bits. + ovrFovPort DefaultEyeFov[ovrEye_Count]; ///< Defines the recommended FOVs for the HMD. + ovrFovPort MaxEyeFov[ovrEye_Count]; ///< Defines the maximum FOVs for the HMD. + ovrSizei Resolution; ///< Resolution of the full HMD screen (both eyes) in pixels. + float DisplayRefreshRate; ///< Refresh rate of the display in cycles per second. + OVR_ON64(OVR_UNUSED_STRUCT_PAD(pad1, 4)) ///< \internal struct paddding. +} ovrHmdDesc; + +/// Used as an opaque pointer to an OVR session. +typedef struct ovrHmdStruct* ovrSession; + +#ifdef OVR_OS_WIN32 +typedef uint32_t ovrProcessId; +#else +typedef pid_t ovrProcessId; +#endif + +/// Fallback definitions for when the vulkan header isn't being included +#if !defined(VK_VERSION_1_0) +// From : +#define VK_DEFINE_HANDLE(object) typedef struct object##_T* object; +#if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__)) || \ + defined(_M_X64) || defined(__ia64) || defined(_M_IA64) || defined(__aarch64__) || \ + defined(__powerpc64__) +#define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) typedef struct object##_T* object; +#else +#define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) typedef uint64_t object; +#endif +VK_DEFINE_HANDLE(VkInstance) +VK_DEFINE_HANDLE(VkPhysicalDevice) +VK_DEFINE_HANDLE(VkDevice) +VK_DEFINE_HANDLE(VkQueue) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkImage) +#endif + +/// Bit flags describing the current status of sensor tracking. +/// The values must be the same as in enum StatusBits +/// +/// \see ovrTrackingState +/// +typedef enum ovrStatusBits_ { + ovrStatus_OrientationTracked = 0x0001, ///< Orientation is currently tracked (connected & in use). + ovrStatus_PositionTracked = 0x0002, ///< Position is currently tracked (false if out of range). + ovrStatus_EnumSize = 0x7fffffff ///< \internal Force type int32_t. +} ovrStatusBits; + +/// Specifies the description of a single sensor. +/// +/// \see ovr_GetTrackerDesc +/// +typedef struct OVR_ALIGNAS(OVR_PTR_SIZE) ovrTrackerDesc_ { + float FrustumHFovInRadians; ///< Sensor frustum horizontal field-of-view (if present). + float FrustumVFovInRadians; ///< Sensor frustum vertical field-of-view (if present). + float FrustumNearZInMeters; ///< Sensor frustum near Z (if present). + float FrustumFarZInMeters; ///< Sensor frustum far Z (if present). +} ovrTrackerDesc; + +/// Specifies sensor flags. +/// +/// /see ovrTrackerPose +/// +typedef enum ovrTrackerFlags_ { + /// The sensor is present, else the sensor is absent or offline. + ovrTracker_Connected = 0x0020, + + /// The sensor has a valid pose, else the pose is unavailable. + /// This will only be set if ovrTracker_Connected is set. + ovrTracker_PoseTracked = 0x0004 +} ovrTrackerFlags; + +/// Specifies the pose for a single sensor. +/// +typedef struct OVR_ALIGNAS(8) _ovrTrackerPose { + /// ovrTrackerFlags. + unsigned int TrackerFlags; + + /// The sensor's pose. This pose includes sensor tilt (roll and pitch). + /// For a leveled coordinate system use LeveledPose. + ovrPosef Pose; + + /// The sensor's leveled pose, aligned with gravity. This value includes pos and yaw of the + /// sensor, but not roll and pitch. It can be used as a reference point to render real-world + /// objects in the correct location. + ovrPosef LeveledPose; + + OVR_UNUSED_STRUCT_PAD(pad0, 4) ///< \internal struct pad. +} ovrTrackerPose; + +/// Tracking state at a given absolute time (describes predicted HMD pose, etc.). +/// Returned by ovr_GetTrackingState. +/// +/// \see ovr_GetTrackingState +/// +typedef struct OVR_ALIGNAS(8) ovrTrackingState_ { + /// Predicted head pose (and derivatives) at the requested absolute time. + ovrPoseStatef HeadPose; + + /// HeadPose tracking status described by ovrStatusBits. + unsigned int StatusFlags; + + /// The most recent calculated pose for each hand when hand controller tracking is present. + /// HandPoses[ovrHand_Left] refers to the left hand and HandPoses[ovrHand_Right] to the right. + /// These values can be combined with ovrInputState for complete hand controller information. + ovrPoseStatef HandPoses[2]; + + /// HandPoses status flags described by ovrStatusBits. + /// Only ovrStatus_OrientationTracked and ovrStatus_PositionTracked are reported. + unsigned int HandStatusFlags[2]; + + /// The pose of the origin captured during calibration. + /// Like all other poses here, this is expressed in the space set by ovr_RecenterTrackingOrigin, + /// or ovr_SpecifyTrackingOrigin and so will change every time either of those functions are + /// called. This pose can be used to calculate where the calibrated origin lands in the new + /// recentered space. If an application never calls ovr_RecenterTrackingOrigin or + /// ovr_SpecifyTrackingOrigin, expect this value to be the identity pose and as such will point + /// respective origin based on ovrTrackingOrigin requested when calling ovr_GetTrackingState. + ovrPosef CalibratedOrigin; + +} ovrTrackingState; + + + +/// Rendering information for each eye. Computed by ovr_GetRenderDesc() based on the +/// specified FOV. Note that the rendering viewport is not included +/// here as it can be specified separately and modified per frame by +/// passing different Viewport values in the layer structure. +/// +/// \see ovr_GetRenderDesc +/// +typedef struct OVR_ALIGNAS(4) ovrEyeRenderDesc_ { + ovrEyeType Eye; ///< The eye index to which this instance corresponds. + ovrFovPort Fov; ///< The field of view. + ovrRecti DistortedViewport; ///< Distortion viewport. + ovrVector2f PixelsPerTanAngleAtCenter; ///< How many display pixels will fit in tan(angle) = 1. + ovrPosef HmdToEyePose; ///< Transform of eye from the HMD center, in meters. +} ovrEyeRenderDesc; + +/// Projection information for ovrLayerEyeFovDepth. +/// +/// Use the utility function ovrTimewarpProjectionDesc_FromProjection to +/// generate this structure from the application's projection matrix. +/// +/// \see ovrLayerEyeFovDepth, ovrTimewarpProjectionDesc_FromProjection +/// +typedef struct OVR_ALIGNAS(4) ovrTimewarpProjectionDesc_ { + float Projection22; ///< Projection matrix element [2][2]. + float Projection23; ///< Projection matrix element [2][3]. + float Projection32; ///< Projection matrix element [3][2]. +} ovrTimewarpProjectionDesc; + + +/// Contains the data necessary to properly calculate position info for various layer types. +/// - HmdToEyePose is the same value-pair provided in ovrEyeRenderDesc. Modifying this value is +/// suggested only if the app is forcing monoscopic rendering and requires that all layers +/// including quad layers show up in a monoscopic fashion. +/// - HmdSpaceToWorldScaleInMeters is used to scale player motion into in-application units. +/// In other words, it is how big an in-application unit is in the player's physical meters. +/// For example, if the application uses inches as its units then HmdSpaceToWorldScaleInMeters +/// would be 0.0254. +/// Note that if you are scaling the player in size, this must also scale. So if your application +/// units are inches, but you're shrinking the player to half their normal size, then +/// HmdSpaceToWorldScaleInMeters would be 0.0254*2.0. +/// +/// \see ovrEyeRenderDesc, ovr_SubmitFrame +/// +typedef struct OVR_ALIGNAS(4) ovrViewScaleDesc_ { + ovrPosef HmdToEyePose[ovrEye_Count]; ///< Transform of each eye from the HMD center, in meters. + float HmdSpaceToWorldScaleInMeters; ///< Ratio of viewer units to meter units. +} ovrViewScaleDesc; + +//----------------------------------------------------------------------------------- +// ***** Platform-independent Rendering Configuration + +/// The type of texture resource. +/// +/// \see ovrTextureSwapChainDesc +/// +typedef enum ovrTextureType_ { + ovrTexture_2D, ///< 2D textures. + ovrTexture_2D_External, ///< Application-provided 2D texture. Not supported on PC. + ovrTexture_Cube, ///< Cube maps. ovrTextureSwapChainDesc::ArraySize must be 6 for this type. + ovrTexture_Count, + ovrTexture_EnumSize = 0x7fffffff ///< \internal Force type int32_t. +} ovrTextureType; + +/// The bindings required for texture swap chain. +/// +/// All texture swap chains are automatically bindable as shader +/// input resources since the Oculus runtime needs this to read them. +/// +/// \see ovrTextureSwapChainDesc +/// +typedef enum ovrTextureBindFlags_ { + ovrTextureBind_None, + + /// The application can write into the chain with pixel shader. + ovrTextureBind_DX_RenderTarget = 0x0001, + + /// The application can write to the chain with compute shader. + ovrTextureBind_DX_UnorderedAccess = 0x0002, + + /// The chain buffers can be bound as depth and/or stencil buffers. + /// This flag cannot be combined with ovrTextureBind_DX_RenderTarget. + ovrTextureBind_DX_DepthStencil = 0x0004, + + ovrTextureBind_EnumSize = 0x7fffffff ///< \internal Force type int32_t. +} ovrTextureBindFlags; + +/// The format of a texture. +/// +/// \see ovrTextureSwapChainDesc +/// +typedef enum ovrTextureFormat_ { + OVR_FORMAT_UNKNOWN = 0, + OVR_FORMAT_B5G6R5_UNORM = 1, ///< Not currently supported on PC. Requires a DirectX 11.1 device. + OVR_FORMAT_B5G5R5A1_UNORM = 2, ///< Not currently supported on PC. Requires a DirectX 11.1 device. + OVR_FORMAT_B4G4R4A4_UNORM = 3, ///< Not currently supported on PC. Requires a DirectX 11.1 device. + OVR_FORMAT_R8G8B8A8_UNORM = 4, + OVR_FORMAT_R8G8B8A8_UNORM_SRGB = 5, + OVR_FORMAT_B8G8R8A8_UNORM = 6, + OVR_FORMAT_B8G8R8_UNORM = 27, + OVR_FORMAT_B8G8R8A8_UNORM_SRGB = 7, ///< Not supported for OpenGL applications + OVR_FORMAT_B8G8R8X8_UNORM = 8, ///< Not supported for OpenGL applications + OVR_FORMAT_B8G8R8X8_UNORM_SRGB = 9, ///< Not supported for OpenGL applications + OVR_FORMAT_R16G16B16A16_FLOAT = 10, + OVR_FORMAT_R11G11B10_FLOAT = 25, ///< Introduced in v1.10 + + // Depth formats + OVR_FORMAT_D16_UNORM = 11, + OVR_FORMAT_D24_UNORM_S8_UINT = 12, + OVR_FORMAT_D32_FLOAT = 13, + OVR_FORMAT_D32_FLOAT_S8X24_UINT = 14, + + // Added in 1.5 compressed formats can be used for static layers + OVR_FORMAT_BC1_UNORM = 15, + OVR_FORMAT_BC1_UNORM_SRGB = 16, + OVR_FORMAT_BC2_UNORM = 17, + OVR_FORMAT_BC2_UNORM_SRGB = 18, + OVR_FORMAT_BC3_UNORM = 19, + OVR_FORMAT_BC3_UNORM_SRGB = 20, + OVR_FORMAT_BC6H_UF16 = 21, + OVR_FORMAT_BC6H_SF16 = 22, + OVR_FORMAT_BC7_UNORM = 23, + OVR_FORMAT_BC7_UNORM_SRGB = 24, + + + OVR_FORMAT_ENUMSIZE = 0x7fffffff ///< \internal Force type int32_t. +} ovrTextureFormat; + +/// Misc flags overriding particular +/// behaviors of a texture swap chain +/// +/// \see ovrTextureSwapChainDesc +/// +typedef enum ovrTextureMiscFlags_ { + ovrTextureMisc_None, + + /// Vulkan and DX only: The underlying texture is created with a TYPELESS equivalent + /// of the format specified in the texture desc. The SDK will still access the + /// texture using the format specified in the texture desc, but the app can + /// create views with different formats if this is specified. + ovrTextureMisc_DX_Typeless = 0x0001, + + /// DX only: Allow generation of the mip chain on the GPU via the GenerateMips + /// call. This flag requires that RenderTarget binding also be specified. + ovrTextureMisc_AllowGenerateMips = 0x0002, + + /// Texture swap chain contains protected content, and requires + /// HDCP connection in order to display to HMD. Also prevents + /// mirroring or other redirection of any frame containing this contents + ovrTextureMisc_ProtectedContent = 0x0004, + + /// Automatically generate and use the mip chain in composition on each submission. + /// Mips are regenerated from highest quality level, ignoring other pre-existing mip levels. + /// Not supported for depth or compressed (BC) formats. + ovrTextureMisc_AutoGenerateMips = 0x0008, + + ovrTextureMisc_EnumSize = 0x7fffffff ///< \internal Force type int32_t. +} ovrTextureFlags; + +/// Description used to create a texture swap chain. +/// +/// \see ovr_CreateTextureSwapChainDX +/// \see ovr_CreateTextureSwapChainGL +/// +typedef struct ovrTextureSwapChainDesc_ { + ovrTextureType Type; ///< Must not be ovrTexture_Window + ovrTextureFormat Format; + int ArraySize; ///< Must be 6 for ovrTexture_Cube, 1 for other types. + int Width; + int Height; + int MipLevels; + int SampleCount; + ovrBool StaticImage; ///< Not buffered in a chain. For images that don't change + OVR_ALIGNAS(4) unsigned int MiscFlags; ///< ovrTextureFlags + OVR_ALIGNAS(4) unsigned int BindFlags; ///< ovrTextureBindFlags. Not used for GL. +} ovrTextureSwapChainDesc; + +/// Bit flags used as part of ovrMirrorTextureDesc's MirrorOptions field. +/// +/// \see ovr_CreateMirrorTextureWithOptionsDX +/// \see ovr_CreateMirrorTextureWithOptionsGL +/// \see ovr_CreateMirrorTextureWithOptionsVk +/// +typedef enum ovrMirrorOptions_ { + /// By default the mirror texture will be: + /// * Pre-distortion (i.e. rectilinear) + /// * Contain both eye textures + /// * Exclude Guardian, Notifications, System Menu GUI + ovrMirrorOption_Default = 0x0000, + + /// Retrieves the barrel distorted texture contents instead of the rectilinear one + /// This is only recommended for debugging purposes, and not for final desktop presentation + ovrMirrorOption_PostDistortion = 0x0001, + + /// Since ovrMirrorOption_Default renders both eyes into the mirror texture, + /// these two flags are exclusive (i.e. cannot use them simultaneously) + ovrMirrorOption_LeftEyeOnly = 0x0002, + ovrMirrorOption_RightEyeOnly = 0x0004, + + /// Shows the boundary system aka Guardian on the mirror texture + ovrMirrorOption_IncludeGuardian = 0x0008, + + /// Shows system notifications the user receives on the mirror texture + ovrMirrorOption_IncludeNotifications = 0x0010, + + /// Shows the system menu (triggered by hitting the Home button) on the mirror texture + ovrMirrorOption_IncludeSystemGui = 0x0020, + + + ovrMirrorOption_EnumSize = 0x7fffffff ///< \internal Force type int32_t. +} ovrMirrorOptions; + +/// Description used to create a mirror texture. +/// +/// \see ovr_CreateMirrorTextureWithOptionsDX +/// \see ovr_CreateMirrorTextureWithOptionsGL +/// \see ovr_CreateMirrorTextureWithOptionsVk +/// +typedef struct ovrMirrorTextureDesc_ { + ovrTextureFormat Format; + int Width; + int Height; + unsigned int MiscFlags; ///< ovrTextureFlags + unsigned int MirrorOptions; ///< ovrMirrorOptions +} ovrMirrorTextureDesc; + +typedef struct ovrTextureSwapChainData* ovrTextureSwapChain; +typedef struct ovrMirrorTextureData* ovrMirrorTexture; + +//----------------------------------------------------------------------------------- + +/// Describes button input types. +/// Button inputs are combined; that is they will be reported as pressed if they are +/// pressed on either one of the two devices. +/// The ovrButton_Up/Down/Left/Right map to both XBox D-Pad and directional buttons. +/// The ovrButton_Enter and ovrButton_Return map to Start and Back controller buttons, respectively. +typedef enum ovrButton_ { + /// A button on XBox controllers and right Touch controller. Select button on Oculus Remote. + ovrButton_A = 0x00000001, + + /// B button on XBox controllers and right Touch controller. Back button on Oculus Remote. + ovrButton_B = 0x00000002, + + /// Right thumbstick on XBox controllers and Touch controllers. Not present on Oculus Remote. + ovrButton_RThumb = 0x00000004, + + /// Right shoulder button on XBox controllers. Not present on Touch controllers or Oculus Remote. + ovrButton_RShoulder = 0x00000008, + + + /// X button on XBox controllers and left Touch controller. Not present on Oculus Remote. + ovrButton_X = 0x00000100, + + /// Y button on XBox controllers and left Touch controller. Not present on Oculus Remote. + ovrButton_Y = 0x00000200, + + /// Left thumbstick on XBox controllers and Touch controllers. Not present on Oculus Remote. + ovrButton_LThumb = 0x00000400, + + /// Left shoulder button on XBox controllers. Not present on Touch controllers or Oculus Remote. + ovrButton_LShoulder = 0x00000800, + + /// Up button on XBox controllers and Oculus Remote. Not present on Touch controllers. + ovrButton_Up = 0x00010000, + + /// Down button on XBox controllers and Oculus Remote. Not present on Touch controllers. + ovrButton_Down = 0x00020000, + + /// Left button on XBox controllers and Oculus Remote. Not present on Touch controllers. + ovrButton_Left = 0x00040000, + + /// Right button on XBox controllers and Oculus Remote. Not present on Touch controllers. + ovrButton_Right = 0x00080000, + + /// Start on XBox 360 controller. Menu on XBox One controller and Left Touch controller. + /// Should be referred to as the Menu button in user-facing documentation. + ovrButton_Enter = 0x00100000, + + /// Back on Xbox 360 controller. View button on XBox One controller. Not present on Touch + /// controllers or Oculus Remote. + ovrButton_Back = 0x00200000, + + /// Volume button on Oculus Remote. Not present on XBox or Touch controllers. + ovrButton_VolUp = 0x00400000, + + /// Volume button on Oculus Remote. Not present on XBox or Touch controllers. + ovrButton_VolDown = 0x00800000, + + /// Home button on XBox controllers. Oculus button on Touch controllers and Oculus Remote. + ovrButton_Home = 0x01000000, + + // Bit mask of all buttons that are for private usage by Oculus + ovrButton_Private = ovrButton_VolUp | ovrButton_VolDown | ovrButton_Home, + + // Bit mask of all buttons on the right Touch controller + ovrButton_RMask = ovrButton_A | ovrButton_B | ovrButton_RThumb | ovrButton_RShoulder, + + // Bit mask of all buttons on the left Touch controller + ovrButton_LMask = + ovrButton_X | ovrButton_Y | ovrButton_LThumb | ovrButton_LShoulder | ovrButton_Enter, + + ovrButton_EnumSize = 0x7fffffff ///< \internal Force type int32_t. +} ovrButton; + +/// Describes touch input types. +/// These values map to capacitive touch values reported ovrInputState::Touch. +/// Some of these values are mapped to button bits for consistency. +typedef enum ovrTouch_ { + ovrTouch_A = ovrButton_A, + ovrTouch_B = ovrButton_B, + ovrTouch_RThumb = ovrButton_RThumb, + ovrTouch_RThumbRest = 0x00000008, + ovrTouch_RIndexTrigger = 0x00000010, + + // Bit mask of all the button touches on the right controller + ovrTouch_RButtonMask = + ovrTouch_A | ovrTouch_B | ovrTouch_RThumb | ovrTouch_RThumbRest | ovrTouch_RIndexTrigger, + + ovrTouch_X = ovrButton_X, + ovrTouch_Y = ovrButton_Y, + ovrTouch_LThumb = ovrButton_LThumb, + ovrTouch_LThumbRest = 0x00000800, + ovrTouch_LIndexTrigger = 0x00001000, + + // Bit mask of all the button touches on the left controller + ovrTouch_LButtonMask = + ovrTouch_X | ovrTouch_Y | ovrTouch_LThumb | ovrTouch_LThumbRest | ovrTouch_LIndexTrigger, + + // Finger pose state + // Derived internally based on distance, proximity to sensors and filtering. + ovrTouch_RIndexPointing = 0x00000020, + ovrTouch_RThumbUp = 0x00000040, + ovrTouch_LIndexPointing = 0x00002000, + ovrTouch_LThumbUp = 0x00004000, + + // Bit mask of all right controller poses + ovrTouch_RPoseMask = ovrTouch_RIndexPointing | ovrTouch_RThumbUp, + + // Bit mask of all left controller poses + ovrTouch_LPoseMask = ovrTouch_LIndexPointing | ovrTouch_LThumbUp, + + ovrTouch_EnumSize = 0x7fffffff ///< \internal Force type int32_t. +} ovrTouch; + +/// Describes the Touch Haptics engine. +/// Currently, those values will NOT change during a session. +typedef struct OVR_ALIGNAS(OVR_PTR_SIZE) ovrTouchHapticsDesc_ { + // Haptics engine frequency/sample-rate, sample time in seconds equals 1.0/sampleRateHz + int SampleRateHz; + // Size of each Haptics sample, sample value range is [0, 2^(Bytes*8)-1] + int SampleSizeInBytes; + + // Queue size that would guarantee Haptics engine would not starve for data + // Make sure size doesn't drop below it for best results + int QueueMinSizeToAvoidStarvation; + + // Minimum, Maximum and Optimal number of samples that can be sent to Haptics through + // ovr_SubmitControllerVibration + int SubmitMinSamples; + int SubmitMaxSamples; + int SubmitOptimalSamples; +} ovrTouchHapticsDesc; + +/// Specifies which controller is connected; multiple can be connected at once. +typedef enum ovrControllerType_ { + ovrControllerType_None = 0x0000, + ovrControllerType_LTouch = 0x0001, + ovrControllerType_RTouch = 0x0002, + ovrControllerType_Touch = (ovrControllerType_LTouch | ovrControllerType_RTouch), + ovrControllerType_Remote = 0x0004, + + ovrControllerType_XBox = 0x0010, + + ovrControllerType_Object0 = 0x0100, + ovrControllerType_Object1 = 0x0200, + ovrControllerType_Object2 = 0x0400, + ovrControllerType_Object3 = 0x0800, + + ovrControllerType_Active = 0xffffffff, ///< Operate on or query whichever controller is active. + + ovrControllerType_EnumSize = 0x7fffffff ///< \internal Force type int32_t. +} ovrControllerType; + +/// Haptics buffer submit mode +typedef enum ovrHapticsBufferSubmitMode_ { + /// Enqueue buffer for later playback + ovrHapticsBufferSubmit_Enqueue +} ovrHapticsBufferSubmitMode; + +/// Maximum number of samples in ovrHapticsBuffer +#define OVR_HAPTICS_BUFFER_SAMPLES_MAX 256 + +/// Haptics buffer descriptor, contains amplitude samples used for Touch vibration +typedef struct ovrHapticsBuffer_ { + /// Samples stored in opaque format + const void* Samples; + /// Number of samples (up to OVR_HAPTICS_BUFFER_SAMPLES_MAX) + int SamplesCount; + /// How samples are submitted to the hardware + ovrHapticsBufferSubmitMode SubmitMode; +} ovrHapticsBuffer; + +/// State of the Haptics playback for Touch vibration +typedef struct ovrHapticsPlaybackState_ { + // Remaining space available to queue more samples + int RemainingQueueSpace; + + // Number of samples currently queued + int SamplesQueued; +} ovrHapticsPlaybackState; + +/// Position tracked devices +typedef enum ovrTrackedDeviceType_ { + ovrTrackedDevice_None = 0x0000, + ovrTrackedDevice_HMD = 0x0001, + ovrTrackedDevice_LTouch = 0x0002, + ovrTrackedDevice_RTouch = 0x0004, + ovrTrackedDevice_Touch = (ovrTrackedDevice_LTouch | ovrTrackedDevice_RTouch), + + ovrTrackedDevice_Object0 = 0x0010, + ovrTrackedDevice_Object1 = 0x0020, + ovrTrackedDevice_Object2 = 0x0040, + ovrTrackedDevice_Object3 = 0x0080, + + ovrTrackedDevice_All = 0xFFFF, +} ovrTrackedDeviceType; + +/// Boundary types that specified while using the boundary system +typedef enum ovrBoundaryType_ { + /// Outer boundary - closely represents user setup walls + ovrBoundary_Outer = 0x0001, + + /// Play area - safe rectangular area inside outer boundary which can optionally be used to + /// restrict user interactions and motion. + ovrBoundary_PlayArea = 0x0100, +} ovrBoundaryType; + +/// Boundary system look and feel +typedef struct ovrBoundaryLookAndFeel_ { + /// Boundary color (alpha channel is ignored) + ovrColorf Color; +} ovrBoundaryLookAndFeel; + +/// Provides boundary test information +typedef struct ovrBoundaryTestResult_ { + /// True if the boundary system is being triggered. Note that due to fade in/out effects this may + /// not exactly match visibility. + ovrBool IsTriggering; + + /// Distance to the closest play area or outer boundary surface. + float ClosestDistance; + + /// Closest point on the boundary surface. + ovrVector3f ClosestPoint; + + /// Unit surface normal of the closest boundary surface. + ovrVector3f ClosestPointNormal; +} ovrBoundaryTestResult; + +/// Provides names for the left and right hand array indexes. +/// +/// \see ovrInputState, ovrTrackingState +/// +typedef enum ovrHandType_ { + ovrHand_Left = 0, + ovrHand_Right = 1, + ovrHand_Count = 2, + ovrHand_EnumSize = 0x7fffffff ///< \internal Force type int32_t. +} ovrHandType; + +/// ovrInputState describes the complete controller input state, including Oculus Touch, +/// and XBox gamepad. If multiple inputs are connected and used at the same time, +/// their inputs are combined. +typedef struct ovrInputState_ { + /// System type when the controller state was last updated. + double TimeInSeconds; + + /// Values for buttons described by ovrButton. + unsigned int Buttons; + + /// Touch values for buttons and sensors as described by ovrTouch. + unsigned int Touches; + + /// Left and right finger trigger values (ovrHand_Left and ovrHand_Right), in range 0.0 to 1.0f. + /// Returns 0 if the value would otherwise be less than 0.1176, for ovrControllerType_XBox. + /// This has been formally named simply "Trigger". We retain the name IndexTrigger for backwards + /// code compatibility. + /// User-facing documentation should refer to it as the Trigger. + float IndexTrigger[ovrHand_Count]; + + /// Left and right hand trigger values (ovrHand_Left and ovrHand_Right), in the range 0.0 to 1.0f. + /// This has been formally named "Grip Button". We retain the name HandTrigger for backwards code + /// compatibility. + /// User-facing documentation should refer to it as the Grip Button or simply Grip. + float HandTrigger[ovrHand_Count]; + + /// Horizontal and vertical thumbstick axis values (ovrHand_Left and ovrHand_Right), in the range + /// of -1.0f to 1.0f. + /// Returns a deadzone (value 0) per each axis if the value on that axis would otherwise have been + /// between -.2746 to +.2746, for ovrControllerType_XBox + ovrVector2f Thumbstick[ovrHand_Count]; + + /// The type of the controller this state is for. + ovrControllerType ControllerType; + + /// Left and right finger trigger values (ovrHand_Left and ovrHand_Right), in range 0.0 to 1.0f. + /// Does not apply a deadzone. Only touch applies a filter. + /// This has been formally named simply "Trigger". We retain the name IndexTrigger for backwards + /// code compatibility. + /// User-facing documentation should refer to it as the Trigger. + float IndexTriggerNoDeadzone[ovrHand_Count]; + + /// Left and right hand trigger values (ovrHand_Left and ovrHand_Right), in the range 0.0 to 1.0f. + /// Does not apply a deadzone. Only touch applies a filter. + /// This has been formally named "Grip Button". We retain the name HandTrigger for backwards code + /// compatibility. + /// User-facing documentation should refer to it as the Grip Button or simply Grip. + float HandTriggerNoDeadzone[ovrHand_Count]; + + /// Horizontal and vertical thumbstick axis values (ovrHand_Left and ovrHand_Right), in the range + /// -1.0f to 1.0f + /// Does not apply a deadzone or filter. + ovrVector2f ThumbstickNoDeadzone[ovrHand_Count]; + + /// Left and right finger trigger values (ovrHand_Left and ovrHand_Right), in range 0.0 to 1.0f. + /// No deadzone or filter + /// This has been formally named "Grip Button". We retain the name HandTrigger for backwards code + /// compatibility. + /// User-facing documentation should refer to it as the Grip Button or simply Grip. + float IndexTriggerRaw[ovrHand_Count]; + + /// Left and right hand trigger values (ovrHand_Left and ovrHand_Right), in the range 0.0 to 1.0f. + /// No deadzone or filter + /// This has been formally named "Grip Button". We retain the name HandTrigger for backwards code + /// compatibility. + /// User-facing documentation should refer to it as the Grip Button or simply Grip. + float HandTriggerRaw[ovrHand_Count]; + + /// Horizontal and vertical thumbstick axis values (ovrHand_Left and ovrHand_Right), in the range + /// -1.0f to 1.0f + /// No deadzone or filter + ovrVector2f ThumbstickRaw[ovrHand_Count]; +} ovrInputState; + +typedef struct ovrCameraIntrinsics_ { + /// Time in seconds from last change to the parameters + double LastChangedTime; + + /// Angles of all 4 sides of viewport + ovrFovPort FOVPort; + + /// Near plane of the virtual camera used to match the external camera + float VirtualNearPlaneDistanceMeters; + + /// Far plane of the virtual camera used to match the external camera + float VirtualFarPlaneDistanceMeters; + + /// Height in pixels of image sensor + ovrSizei ImageSensorPixelResolution; + + /// The lens distortion matrix of camera + ovrMatrix4f LensDistortionMatrix; + + /// How often, in seconds, the exposure is taken + double ExposurePeriodSeconds; + + /// length of the exposure time + double ExposureDurationSeconds; + +} ovrCameraIntrinsics; + +typedef enum ovrCameraStatusFlags_ { + /// Initial state of camera + ovrCameraStatus_None = 0x0, + + /// Bit set when the camera is connected to the system + ovrCameraStatus_Connected = 0x1, + + /// Bit set when the camera is undergoing calibration + ovrCameraStatus_Calibrating = 0x2, + + /// Bit set when the camera has tried & failed calibration + ovrCameraStatus_CalibrationFailed = 0x4, + + /// Bit set when the camera has tried & passed calibration + ovrCameraStatus_Calibrated = 0x8, + + /// Bit set when the camera is capturing + ovrCameraStatus_Capturing = 0x10, + + ovrCameraStatus_EnumSize = 0x7fffffff ///< \internal Force type int32_t. +} ovrCameraStatusFlags; + +typedef struct ovrCameraExtrinsics_ { + /// Time in seconds from last change to the parameters. + /// For instance, if the pose changes, or a camera exposure happens, this struct will be updated. + double LastChangedTimeSeconds; + + /// Current Status of the camera, a mix of bits from ovrCameraStatusFlags + unsigned int CameraStatusFlags; + + /// Which Tracked device, if any, is the camera rigidly attached to + /// If set to ovrTrackedDevice_None, then the camera is not attached to a tracked object. + /// If the external camera moves while unattached (i.e. set to ovrTrackedDevice_None), its Pose + /// won't be updated + ovrTrackedDeviceType AttachedToDevice; + + /// The relative Pose of the External Camera. + /// If AttachedToDevice is ovrTrackedDevice_None, then this is a absolute pose in tracking space + ovrPosef RelativePose; + + /// The time, in seconds, when the last successful exposure was taken + double LastExposureTimeSeconds; + + /// Estimated exposure latency to get from the exposure time to the system + double ExposureLatencySeconds; + + /// Additional latency to get from the exposure time of the real camera to match the render time + /// of the virtual camera + double AdditionalLatencySeconds; + +} ovrCameraExtrinsics; +#define OVR_MAX_EXTERNAL_CAMERA_COUNT 16 +#define OVR_EXTERNAL_CAMERA_NAME_SIZE 32 +typedef struct ovrExternalCamera_ { + char Name[OVR_EXTERNAL_CAMERA_NAME_SIZE]; // camera identifier: vid + pid + serial number etc. + ovrCameraIntrinsics Intrinsics; + ovrCameraExtrinsics Extrinsics; +} ovrExternalCamera; + +//----------------------------------------------------------------------------------- +// ***** Initialize structures + +/// Initialization flags. +/// +/// \see ovrInitParams, ovr_Initialize +/// +typedef enum ovrInitFlags_ { + /// When a debug library is requested, a slower debugging version of the library will + /// run which can be used to help solve problems in the library and debug application code. + ovrInit_Debug = 0x00000001, + + + /// When a version is requested, the LibOVR runtime respects the RequestedMinorVersion + /// field and verifies that the RequestedMinorVersion is supported. Normally when you + /// specify this flag you simply use OVR_MINOR_VERSION for ovrInitParams::RequestedMinorVersion, + /// though you could use a lower version than OVR_MINOR_VERSION to specify previous + /// version behavior. + ovrInit_RequestVersion = 0x00000004, + + + /// This client will not be visible in the HMD. + /// Typically set by diagnostic or debugging utilities. + ovrInit_Invisible = 0x00000010, + + /// This client will alternate between VR and 2D rendering. + /// Typically set by game engine editors and VR-enabled web browsers. + ovrInit_MixedRendering = 0x00000020, + + /// This client is aware of ovrSessionStatus focus states (e.g. ovrSessionStatus::HasInputFocus), + /// and responds to them appropriately (e.g. pauses and stops drawing hands when lacking focus). + ovrInit_FocusAware = 0x00000040, + + + + + + /// These bits are writable by user code. + ovrinit_WritableBits = 0x00ffffff, + + ovrInit_EnumSize = 0x7fffffff ///< \internal Force type int32_t. +} ovrInitFlags; + +/// Logging levels +/// +/// \see ovrInitParams, ovrLogCallback +/// +typedef enum ovrLogLevel_ { + ovrLogLevel_Debug = 0, ///< Debug-level log event. + ovrLogLevel_Info = 1, ///< Info-level log event. + ovrLogLevel_Error = 2, ///< Error-level log event. + + ovrLogLevel_EnumSize = 0x7fffffff ///< \internal Force type int32_t. +} ovrLogLevel; + +/// Signature of the logging callback function pointer type. +/// +/// \param[in] userData is an arbitrary value specified by the user of ovrInitParams. +/// \param[in] level is one of the ovrLogLevel constants. +/// \param[in] message is a UTF8-encoded null-terminated string. +/// \see ovrInitParams, ovrLogLevel, ovr_Initialize +/// +typedef void(OVR_CDECL* ovrLogCallback)(uintptr_t userData, int level, const char* message); + +/// Parameters for ovr_Initialize. +/// +/// \see ovr_Initialize +/// +typedef struct OVR_ALIGNAS(8) ovrInitParams_ { + /// Flags from ovrInitFlags to override default behavior. + /// Use 0 for the defaults. + uint32_t Flags; + + /// Requests a specific minor version of the LibOVR runtime. + /// Flags must include ovrInit_RequestVersion or this will be ignored and OVR_MINOR_VERSION + /// will be used. If you are directly calling the LibOVRRT version of ovr_Initialize + /// in the LibOVRRT DLL then this must be valid and include ovrInit_RequestVersion. + uint32_t RequestedMinorVersion; + + /// User-supplied log callback function, which may be called at any time + /// asynchronously from multiple threads until ovr_Shutdown completes. + /// Use NULL to specify no log callback. + ovrLogCallback LogCallback; + + /// User-supplied data which is passed as-is to LogCallback. Typically this + /// is used to store an application-specific pointer which is read in the + /// callback function. + uintptr_t UserData; + + /// Relative number of milliseconds to wait for a connection to the server + /// before failing. Use 0 for the default timeout. + uint32_t ConnectionTimeoutMS; + + OVR_ON64(OVR_UNUSED_STRUCT_PAD(pad0, 4)) ///< \internal + +} ovrInitParams; + +#ifdef __cplusplus +extern "C" { +#endif + +#if !defined(OVR_EXPORTING_CAPI) + +// ----------------------------------------------------------------------------------- +// ***** API Interfaces + +/// Initializes LibOVR +/// +/// Initialize LibOVR for application usage. This includes finding and loading the LibOVRRT +/// shared library. No LibOVR API functions, other than ovr_GetLastErrorInfo and ovr_Detect, can +/// be called unless ovr_Initialize succeeds. A successful call to ovr_Initialize must be eventually +/// followed by a call to ovr_Shutdown. ovr_Initialize calls are idempotent. +/// Calling ovr_Initialize twice does not require two matching calls to ovr_Shutdown. +/// If already initialized, the return value is ovr_Success. +/// +/// LibOVRRT shared library search order: +/// -# Current working directory (often the same as the application directory). +/// -# Module directory (usually the same as the application directory, +/// but not if the module is a separate shared library). +/// -# Application directory +/// -# Development directory (only if OVR_ENABLE_DEVELOPER_SEARCH is enabled, +/// which is off by default). +/// -# Standard OS shared library search location(s) (OS-specific). +/// +/// \param params Specifies custom initialization options. May be NULL to indicate default options +/// when using the CAPI shim. If you are directly calling the LibOVRRT version of +/// ovr_Initialize in the LibOVRRT DLL then this must be valid and +/// include ovrInit_RequestVersion. +/// \return Returns an ovrResult indicating success or failure. In the case of failure, use +/// ovr_GetLastErrorInfo to get more information. Example failed results include: +/// - ovrError_Initialize: Generic initialization error. +/// - ovrError_LibLoad: Couldn't load LibOVRRT. +/// - ovrError_LibVersion: LibOVRRT version incompatibility. +/// - ovrError_ServiceConnection: Couldn't connect to the OVR Service. +/// - ovrError_ServiceVersion: OVR Service version incompatibility. +/// - ovrError_IncompatibleOS: The operating system version is incompatible. +/// - ovrError_DisplayInit: Unable to initialize the HMD display. +/// - ovrError_ServerStart: Unable to start the server. Is it already running? +/// - ovrError_Reinitialization: Attempted to re-initialize with a different version. +/// +/// Example code +/// \code{.cpp} +/// ovrInitParams initParams = { ovrInit_RequestVersion, OVR_MINOR_VERSION, NULL, 0, 0 }; +/// ovrResult result = ovr_Initialize(&initParams); +/// if(OVR_FAILURE(result)) { +/// ovrErrorInfo errorInfo; +/// ovr_GetLastErrorInfo(&errorInfo); +/// DebugLog("ovr_Initialize failed: %s", errorInfo.ErrorString); +/// return false; +/// } +/// [...] +/// \endcode +/// +/// \see ovr_Shutdown +/// +OVR_PUBLIC_FUNCTION(ovrResult) ovr_Initialize(const ovrInitParams* params); + +/// Shuts down LibOVR +/// +/// A successful call to ovr_Initialize must be eventually matched by a call to ovr_Shutdown. +/// After calling ovr_Shutdown, no LibOVR functions can be called except ovr_GetLastErrorInfo +/// or another ovr_Initialize. ovr_Shutdown invalidates all pointers, references, and created +/// objects +/// previously returned by LibOVR functions. The LibOVRRT shared library can be unloaded by +/// ovr_Shutdown. +/// +/// \see ovr_Initialize +/// +OVR_PUBLIC_FUNCTION(void) ovr_Shutdown(); + +/// Returns information about the most recent failed return value by the +/// current thread for this library. +/// +/// This function itself can never generate an error. +/// The last error is never cleared by LibOVR, but will be overwritten by new errors. +/// Do not use this call to determine if there was an error in the last API +/// call as successful API calls don't clear the last ovrErrorInfo. +/// To avoid any inconsistency, ovr_GetLastErrorInfo should be called immediately +/// after an API function that returned a failed ovrResult, with no other API +/// functions called in the interim. +/// +/// \param[out] errorInfo The last ovrErrorInfo for the current thread. +/// +/// \see ovrErrorInfo +/// +OVR_PUBLIC_FUNCTION(void) ovr_GetLastErrorInfo(ovrErrorInfo* errorInfo); + +/// Returns the version string representing the LibOVRRT version. +/// +/// The returned string pointer is valid until the next call to ovr_Shutdown. +/// +/// Note that the returned version string doesn't necessarily match the current +/// OVR_MAJOR_VERSION, etc., as the returned string refers to the LibOVRRT shared +/// library version and not the locally compiled interface version. +/// +/// The format of this string is subject to change in future versions and its contents +/// should not be interpreted. +/// +/// \return Returns a UTF8-encoded null-terminated version string. +/// +OVR_PUBLIC_FUNCTION(const char*) ovr_GetVersionString(); + +/// Writes a message string to the LibOVR tracing mechanism (if enabled). +/// +/// This message will be passed back to the application via the ovrLogCallback if +/// it was registered. +/// +/// \param[in] level One of the ovrLogLevel constants. +/// \param[in] message A UTF8-encoded null-terminated string. +/// \return returns the strlen of the message or a negative value if the message is too large. +/// +/// \see ovrLogLevel, ovrLogCallback +/// +OVR_PUBLIC_FUNCTION(int) ovr_TraceMessage(int level, const char* message); + +/// Identify client application info. +/// +/// The string is one or more newline-delimited lines of optional info +/// indicating engine name, engine version, engine plugin name, engine plugin +/// version, engine editor. The order of the lines is not relevant. Individual +/// lines are optional. A newline is not necessary at the end of the last line. +/// Call after ovr_Initialize and before the first call to ovr_Create. +/// Each value is limited to 20 characters. Key names such as 'EngineName:' +/// 'EngineVersion:' do not count towards this limit. +/// +/// \param[in] identity Specifies one or more newline-delimited lines of optional info: +/// EngineName: %s\n +/// EngineVersion: %s\n +/// EnginePluginName: %s\n +/// EnginePluginVersion: %s\n +/// EngineEditor: ('true' or 'false')\n +/// +/// Example code +/// \code{.cpp} +/// ovr_IdentifyClient("EngineName: Unity\n" +/// "EngineVersion: 5.3.3\n" +/// "EnginePluginName: OVRPlugin\n" +/// "EnginePluginVersion: 1.2.0\n" +/// "EngineEditor: true"); +/// \endcode +/// +OVR_PUBLIC_FUNCTION(ovrResult) ovr_IdentifyClient(const char* identity); + +//------------------------------------------------------------------------------------- +/// @name HMD Management +/// +/// Handles the enumeration, creation, destruction, and properties of an HMD (head-mounted display). +///@{ + +/// Returns information about the current HMD. +/// +/// ovr_Initialize must be called prior to calling this function, +/// otherwise ovrHmdDesc::Type will be set to ovrHmd_None without +/// checking for the HMD presence. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create() or NULL. +/// +/// \return Returns an ovrHmdDesc. If invoked with NULL session argument, ovrHmdDesc::Type +/// set to ovrHmd_None indicates that the HMD is not connected. +/// +OVR_PUBLIC_FUNCTION(ovrHmdDesc) ovr_GetHmdDesc(ovrSession session); + +/// Returns the number of attached trackers. +/// +/// The number of trackers may change at any time, so this function should be called before use +/// as opposed to once on startup. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// +/// \return Returns unsigned int count. +/// +OVR_PUBLIC_FUNCTION(unsigned int) ovr_GetTrackerCount(ovrSession session); + +/// Returns a given attached tracker description. +/// +/// ovr_Initialize must have first been called in order for this to succeed, otherwise the returned +/// trackerDescArray will be zero-initialized. The data returned by this function can change at +/// runtime. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// +/// \param[in] trackerDescIndex Specifies a tracker index. The valid indexes are in the +/// range of 0 to the tracker count returned by ovr_GetTrackerCount. +/// +/// \return Returns ovrTrackerDesc. An empty ovrTrackerDesc will be returned if +/// trackerDescIndex is out of range. +/// +/// \see ovrTrackerDesc, ovr_GetTrackerCount +/// +OVR_PUBLIC_FUNCTION(ovrTrackerDesc) +ovr_GetTrackerDesc(ovrSession session, unsigned int trackerDescIndex); + +/// Creates a handle to a VR session. +/// +/// Upon success the returned ovrSession must be eventually freed with ovr_Destroy when it is no +/// longer needed. +/// A second call to ovr_Create will result in an error return value if the previous session has not +/// been destroyed. +/// +/// \param[out] pSession Provides a pointer to an ovrSession which will be written to upon success. +/// \param[out] pLuid Provides a system specific graphics adapter identifier that locates which +/// graphics adapter has the HMD attached. This must match the adapter used by the application +/// or no rendering output will be possible. This is important for stability on multi-adapter +/// systems. An +/// application that simply chooses the default adapter will not run reliably on multi-adapter +/// systems. +/// \return Returns an ovrResult indicating success or failure. Upon failure +/// the returned ovrSession will be NULL. +/// +/// Example code +/// \code{.cpp} +/// ovrSession session; +/// ovrGraphicsLuid luid; +/// ovrResult result = ovr_Create(&session, &luid); +/// if(OVR_FAILURE(result)) +/// ... +/// \endcode +/// +/// \see ovr_Destroy +/// +OVR_PUBLIC_FUNCTION(ovrResult) ovr_Create(ovrSession* pSession, ovrGraphicsLuid* pLuid); + +/// Destroys the session. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \see ovr_Create +/// +OVR_PUBLIC_FUNCTION(void) ovr_Destroy(ovrSession session); + +#endif // !defined(OVR_EXPORTING_CAPI) + +/// Specifies status information for the current session. +/// +/// \see ovr_GetSessionStatus +/// +typedef struct ovrSessionStatus_ { + /// True if the process has VR focus and thus is visible in the HMD. + ovrBool IsVisible; + + /// True if an HMD is present. + ovrBool HmdPresent; + + /// True if the HMD is on the user's head. + ovrBool HmdMounted; + + /// True if the session is in a display-lost state. See ovr_SubmitFrame. + ovrBool DisplayLost; + + /// True if the application should initiate shutdown. + ovrBool ShouldQuit; + + /// True if UX has requested re-centering. Must call ovr_ClearShouldRecenterFlag, + /// ovr_RecenterTrackingOrigin or ovr_SpecifyTrackingOrigin. + ovrBool ShouldRecenter; + + /// True if the application is the foreground application and receives input (e.g. Touch + /// controller state). If this is false then the application is in the background (but possibly + /// still visible) should hide any input representations such as hands. + ovrBool HasInputFocus; + + /// True if a system overlay is present, such as a dashboard. In this case the application + /// (if visible) should pause while still drawing, avoid drawing near-field graphics so they + /// don't visually fight with the system overlay, and consume fewer CPU and GPU resources. + ovrBool OverlayPresent; + + /// True if runtime is requesting that the application provide depth buffers with projection + /// layers. + ovrBool DepthRequested; + +} ovrSessionStatus; + +#if !defined(OVR_EXPORTING_CAPI) + +/// Returns status information for the application. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[out] sessionStatus Provides an ovrSessionStatus that is filled in. +/// +/// \return Returns an ovrResult indicating success or failure. In the case of +/// failure, use ovr_GetLastErrorInfo to get more information. +/// Return values include but aren't limited to: +/// - ovrSuccess: Completed successfully. +/// - ovrError_ServiceConnection: The service connection was lost and the application +/// must destroy the session. +/// +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_GetSessionStatus(ovrSession session, ovrSessionStatus* sessionStatus); + + +/// Query extension support status. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] extension Extension to query. +/// \param[out] outExtensionSupported Set to extension support status. ovrTrue if supported. +/// +/// \return Returns an ovrResult indicating success or failure. In the case of +/// failure use ovr_GetLastErrorInfo to get more information. +/// +/// \see ovrExtensions +/// +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_IsExtensionSupported( + ovrSession session, + ovrExtensions extension, + ovrBool* outExtensionSupported); + +/// Enable extension. Extensions must be enabled after ovr_Create is called. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] extension Extension to enable. +/// +/// \return Returns an ovrResult indicating success or failure. Extension is only +/// enabled if successful. In the case of failure use ovr_GetLastErrorInfo +/// to get more information. +/// +/// \see ovrExtensions +/// +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_EnableExtension(ovrSession session, ovrExtensions extension); + +//@} + +//------------------------------------------------------------------------------------- +/// @name Tracking +/// +/// Tracking functions handle the position, orientation, and movement of the HMD in space. +/// +/// All tracking interface functions are thread-safe, allowing tracking state to be sampled +/// from different threads. +/// +///@{ + + +/// Sets the tracking origin type +/// +/// When the tracking origin is changed, all of the calls that either provide +/// or accept ovrPosef will use the new tracking origin provided. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] origin Specifies an ovrTrackingOrigin to be used for all ovrPosef +/// +/// \return Returns an ovrResult indicating success or failure. In the case of failure, use +/// ovr_GetLastErrorInfo to get more information. +/// +/// \see ovrTrackingOrigin, ovr_GetTrackingOriginType +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_SetTrackingOriginType(ovrSession session, ovrTrackingOrigin origin); + +/// Gets the tracking origin state +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// +/// \return Returns the ovrTrackingOrigin that was either set by default, or previous set by the +/// application. +/// +/// \see ovrTrackingOrigin, ovr_SetTrackingOriginType +OVR_PUBLIC_FUNCTION(ovrTrackingOrigin) ovr_GetTrackingOriginType(ovrSession session); + +/// Re-centers the sensor position and orientation. +/// +/// This resets the (x,y,z) positional components and the yaw orientation component of the +/// tracking space for the HMD and controllers using the HMD's current tracking pose. +/// If the caller requires some tweaks on top of the HMD's current tracking pose, consider using +/// ovr_SpecifyTrackingOrigin instead. +/// +/// The roll and pitch orientation components are always determined by gravity and cannot +/// be redefined. All future tracking will report values relative to this new reference position. +/// If you are using ovrTrackerPoses then you will need to call ovr_GetTrackerPose after +/// this, because the sensor position(s) will change as a result of this. +/// +/// The headset cannot be facing vertically upward or downward but rather must be roughly +/// level otherwise this function will fail with ovrError_InvalidHeadsetOrientation. +/// +/// For more info, see the notes on each ovrTrackingOrigin enumeration to understand how +/// recenter will vary slightly in its behavior based on the current ovrTrackingOrigin setting. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// +/// \return Returns an ovrResult indicating success or failure. In the case of failure, use +/// ovr_GetLastErrorInfo to get more information. Return values include but aren't limited +/// to: +/// - ovrSuccess: Completed successfully. +/// - ovrError_InvalidHeadsetOrientation: The headset was facing an invalid direction when +/// attempting recentering, such as facing vertically. +/// +/// \see ovrTrackingOrigin, ovr_GetTrackerPose, ovr_SpecifyTrackingOrigin +/// +OVR_PUBLIC_FUNCTION(ovrResult) ovr_RecenterTrackingOrigin(ovrSession session); + +/// Allows manually tweaking the sensor position and orientation. +/// +/// This function is similar to ovr_RecenterTrackingOrigin in that it modifies the +/// (x,y,z) positional components and the yaw orientation component of the tracking space for +/// the HMD and controllers. +/// +/// While ovr_RecenterTrackingOrigin resets the tracking origin in reference to the HMD's +/// current pose, ovr_SpecifyTrackingOrigin allows the caller to explicitly specify a transform +/// for the tracking origin. This transform is expected to be an offset to the most recent +/// recentered origin, so calling this function repeatedly with the same originPose will keep +/// nudging the recentered origin in that direction. +/// +/// There are several use cases for this function. For example, if the application decides to +/// limit the yaw, or translation of the recentered pose instead of directly using the HMD pose +/// the application can query the current tracking state via ovr_GetTrackingState, and apply +/// some limitations to the HMD pose because feeding this pose back into this function. +/// Similarly, this can be used to "adjust the seating position" incrementally in apps that +/// feature seated experiences such as cockpit-based games. +/// +/// This function can emulate ovr_RecenterTrackingOrigin as such: +/// ovrTrackingState ts = ovr_GetTrackingState(session, 0.0, ovrFalse); +/// ovr_SpecifyTrackingOrigin(session, ts.HeadPose.ThePose); +/// +/// The roll and pitch orientation components are determined by gravity and cannot be redefined. +/// If you are using ovrTrackerPoses then you will need to call ovr_GetTrackerPose after +/// this, because the sensor position(s) will change as a result of this. +/// +/// For more info, see the notes on each ovrTrackingOrigin enumeration to understand how +/// recenter will vary slightly in its behavior based on the current ovrTrackingOrigin setting. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] originPose Specifies a pose that will be used to transform the current tracking +/// origin. +/// +/// \return Returns an ovrResult indicating success or failure. In the case of failure, use +/// ovr_GetLastErrorInfo to get more information. Return values include but aren't limited +/// to: +/// - ovrSuccess: Completed successfully. +/// - ovrError_InvalidParameter: The heading direction in originPose was invalid, +/// such as facing vertically. This can happen if the caller is directly feeding the pose +/// of a position-tracked device such as an HMD or controller into this function. +/// +/// \see ovrTrackingOrigin, ovr_GetTrackerPose, ovr_RecenterTrackingOrigin +/// +OVR_PUBLIC_FUNCTION(ovrResult) ovr_SpecifyTrackingOrigin(ovrSession session, ovrPosef originPose); + +/// Clears the ShouldRecenter status bit in ovrSessionStatus. +/// +/// Clears the ShouldRecenter status bit in ovrSessionStatus, allowing further recenter requests to +/// be detected. Since this is automatically done by ovr_RecenterTrackingOrigin and +/// ovr_SpecifyTrackingOrigin, this function only needs to be called when application is doing +/// its own re-centering logic. +OVR_PUBLIC_FUNCTION(void) ovr_ClearShouldRecenterFlag(ovrSession session); + +/// Returns tracking state reading based on the specified absolute system time. +/// +/// Pass an absTime value of 0.0 to request the most recent sensor reading. In this case +/// both PredictedPose and SamplePose will have the same value. +/// +/// This may also be used for more refined timing of front buffer rendering logic, and so on. +/// This may be called by multiple threads. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] absTime Specifies the absolute future time to predict the return +/// ovrTrackingState value. Use 0 to request the most recent tracking state. +/// \param[in] latencyMarker Specifies that this call is the point in time where +/// the "App-to-Mid-Photon" latency timer starts from. If a given ovrLayer +/// provides "SensorSampleTime", that will override the value stored here. +/// \return Returns the ovrTrackingState that is predicted for the given absTime. +/// +/// \see ovrTrackingState, ovr_GetEyePoses, ovr_GetTimeInSeconds +/// +OVR_PUBLIC_FUNCTION(ovrTrackingState) +ovr_GetTrackingState(ovrSession session, double absTime, ovrBool latencyMarker); + +/// Returns an array of poses, where each pose matches a device type provided by the deviceTypes +/// array parameter. If any pose cannot be retrieved, it will return a reason for the missing +/// pose and the device pose will be zeroed out with a pose quaternion [x=0, y=0, z=0, w=1]. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] deviceTypes Array of device types to query for their poses. +/// \param[in] deviceCount Number of queried poses. This number must match the length of the +/// outDevicePoses and deviceTypes array. +/// \param[in] absTime Specifies the absolute future time to predict the return +/// ovrTrackingState value. Use 0 to request the most recent tracking state. +/// \param[out] outDevicePoses Array of poses, one for each device type in deviceTypes arrays. +/// +/// \return Returns an ovrResult for which OVR_SUCCESS(result) is false upon error and +/// true upon success. +/// +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_GetDevicePoses( + ovrSession session, + ovrTrackedDeviceType* deviceTypes, + int deviceCount, + double absTime, + ovrPoseStatef* outDevicePoses); + + +/// Returns the ovrTrackerPose for the given attached tracker. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] trackerPoseIndex Index of the tracker being requested. +/// +/// \return Returns the requested ovrTrackerPose. An empty ovrTrackerPose will be returned if +/// trackerPoseIndex is out of range. +/// +/// \see ovr_GetTrackerCount +/// +OVR_PUBLIC_FUNCTION(ovrTrackerPose) +ovr_GetTrackerPose(ovrSession session, unsigned int trackerPoseIndex); + +/// Returns the most recent input state for controllers, without positional tracking info. +/// +/// \param[out] inputState Input state that will be filled in. +/// \param[in] ovrControllerType Specifies which controller the input will be returned for. +/// \return Returns ovrSuccess if the new state was successfully obtained. +/// +/// \see ovrControllerType +/// +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_GetInputState(ovrSession session, ovrControllerType controllerType, ovrInputState* inputState); + +/// Returns controller types connected to the system OR'ed together. +/// +/// \return A bitmask of ovrControllerTypes connected to the system. +/// +/// \see ovrControllerType +/// +OVR_PUBLIC_FUNCTION(unsigned int) ovr_GetConnectedControllerTypes(ovrSession session); + +/// Gets information about Haptics engine for the specified Touch controller. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] controllerType The controller to retrieve the information from. +/// +/// \return Returns an ovrTouchHapticsDesc. +/// +OVR_PUBLIC_FUNCTION(ovrTouchHapticsDesc) +ovr_GetTouchHapticsDesc(ovrSession session, ovrControllerType controllerType); + +/// Sets constant vibration (with specified frequency and amplitude) to a controller. +/// Note: ovr_SetControllerVibration cannot be used interchangeably with +/// ovr_SubmitControllerVibration. +/// +/// This method should be called periodically, vibration lasts for a maximum of 2.5 seconds. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] controllerType The controller to set the vibration to. +/// \param[in] frequency Vibration frequency. Supported values are: 0.0 (disabled), 0.5 and 1.0. Non +/// valid values will be clamped. +/// \param[in] amplitude Vibration amplitude in the [0.0, 1.0] range. +/// \return Returns an ovrResult for which OVR_SUCCESS(result) is false upon error and true +/// upon success. Return values include but aren't limited to: +/// - ovrSuccess: The call succeeded and a result was returned. +/// - ovrSuccess_DeviceUnavailable: The call succeeded but the device referred to by +/// controllerType is not available. +/// +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_SetControllerVibration( + ovrSession session, + ovrControllerType controllerType, + float frequency, + float amplitude); + +/// Submits a Haptics buffer (used for vibration) to Touch (only) controllers. +/// Note: ovr_SubmitControllerVibration cannot be used interchangeably with +/// ovr_SetControllerVibration. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] controllerType Controller where the Haptics buffer will be played. +/// \param[in] buffer Haptics buffer containing amplitude samples to be played. +/// \return Returns an ovrResult for which OVR_SUCCESS(result) is false upon error and true +/// upon success. Return values include but aren't limited to: +/// - ovrSuccess: The call succeeded and a result was returned. +/// - ovrSuccess_DeviceUnavailable: The call succeeded but the device referred to by +/// controllerType is not available. +/// +/// \see ovrHapticsBuffer +/// +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_SubmitControllerVibration( + ovrSession session, + ovrControllerType controllerType, + const ovrHapticsBuffer* buffer); + +/// Gets the Haptics engine playback state of a specific Touch controller. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] controllerType Controller where the Haptics buffer wil be played. +/// \param[in] outState State of the haptics engine. +/// \return Returns an ovrResult for which OVR_SUCCESS(result) is false upon error and true +/// upon success. Return values include but aren't limited to: +/// - ovrSuccess: The call succeeded and a result was returned. +/// - ovrSuccess_DeviceUnavailable: The call succeeded but the device referred to by +/// controllerType is not available. +/// +/// \see ovrHapticsPlaybackState +/// +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_GetControllerVibrationState( + ovrSession session, + ovrControllerType controllerType, + ovrHapticsPlaybackState* outState); + +/// Tests collision/proximity of position tracked devices (e.g. HMD and/or Touch) against the +/// Boundary System. +/// Note: this method is similar to ovr_BoundaryTestPoint but can be more precise as it may take +/// into account device acceleration/momentum. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] deviceBitmask Bitmask of one or more tracked devices to test. +/// \param[in] boundaryType Must be either ovrBoundary_Outer or ovrBoundary_PlayArea. +/// \param[out] outTestResult Result of collision/proximity test, contains information such as +/// distance and closest point. +/// \return Returns an ovrResult for which OVR_SUCCESS(result) is false upon error and true +/// upon success. Return values include but aren't limited to: +/// - ovrSuccess: The call succeeded and a result was returned. +/// - ovrSuccess_BoundaryInvalid: The call succeeded but the result is not a valid boundary due +/// to not being set up. +/// - ovrSuccess_DeviceUnavailable: The call succeeded but the device referred to by +/// deviceBitmask is not available. +/// +/// \see ovrBoundaryTestResult +/// +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_TestBoundary( + ovrSession session, + ovrTrackedDeviceType deviceBitmask, + ovrBoundaryType boundaryType, + ovrBoundaryTestResult* outTestResult); + +/// Tests collision/proximity of a 3D point against the Boundary System. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] point 3D point to test. +/// \param[in] singleBoundaryType Must be either ovrBoundary_Outer or ovrBoundary_PlayArea to test +/// against +/// \param[out] outTestResult Result of collision/proximity test, contains information such as +/// distance and closest point. +/// \return Returns an ovrResult for which OVR_SUCCESS(result) is false upon error and true +/// upon success. Return values include but aren't limited to: +/// - ovrSuccess: The call succeeded and a result was returned. +/// - ovrSuccess_BoundaryInvalid: The call succeeded but the result is not a valid boundary due +/// to not being set up. +/// +/// \see ovrBoundaryTestResult +/// +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_TestBoundaryPoint( + ovrSession session, + const ovrVector3f* point, + ovrBoundaryType singleBoundaryType, + ovrBoundaryTestResult* outTestResult); + +/// Sets the look and feel of the Boundary System. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] lookAndFeel Look and feel parameters. +/// \return Returns ovrSuccess upon success. +/// \see ovrBoundaryLookAndFeel +/// +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_SetBoundaryLookAndFeel(ovrSession session, const ovrBoundaryLookAndFeel* lookAndFeel); + +/// Resets the look and feel of the Boundary System to its default state. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \return Returns ovrSuccess upon success. +/// \see ovrBoundaryLookAndFeel +/// +OVR_PUBLIC_FUNCTION(ovrResult) ovr_ResetBoundaryLookAndFeel(ovrSession session); + +/// Gets the geometry of the Boundary System's "play area" or "outer boundary" as 3D floor points. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] boundaryType Must be either ovrBoundary_Outer or ovrBoundary_PlayArea. +/// \param[out] outFloorPoints Array of 3D points (in clockwise order) defining the boundary at +/// floor height (can be NULL to retrieve only the number of points). +/// \param[out] outFloorPointsCount Number of 3D points returned in the array. +/// \return Returns an ovrResult for which OVR_SUCCESS(result) is false upon error and true +/// upon success. Return values include but aren't limited to: +/// - ovrSuccess: The call succeeded and a result was returned. +/// - ovrSuccess_BoundaryInvalid: The call succeeded but the result is not a valid boundary due +/// to not being set up. +/// +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_GetBoundaryGeometry( + ovrSession session, + ovrBoundaryType boundaryType, + ovrVector3f* outFloorPoints, + int* outFloorPointsCount); + +/// Gets the dimension of the Boundary System's "play area" or "outer boundary". +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] boundaryType Must be either ovrBoundary_Outer or ovrBoundary_PlayArea. +/// \param[out] outDimensions Dimensions of the axis aligned bounding box that encloses the area in +/// meters (width, height and length). +/// \return Returns an ovrResult for which OVR_SUCCESS(result) is false upon error and true +/// upon success. Return values include but aren't limited to: +/// - ovrSuccess: The call succeeded and a result was returned. +/// - ovrSuccess_BoundaryInvalid: The call succeeded but the result is not a valid boundary due +/// to not being set up. +/// +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_GetBoundaryDimensions( + ovrSession session, + ovrBoundaryType boundaryType, + ovrVector3f* outDimensions); + +/// Returns if the boundary is currently visible. +/// Note: visibility is false if the user has turned off boundaries, otherwise, it's true if +/// the app has requested boundaries to be visible or if any tracked device is currently +/// triggering it. This may not exactly match rendering due to fade-in and fade-out effects. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[out] outIsVisible ovrTrue, if the boundary is visible. +/// \return Returns an ovrResult for which OVR_SUCCESS(result) is false upon error and true +/// upon success. Return values include but aren't limited to: +/// - ovrSuccess: Result was successful and a result was returned. +/// - ovrSuccess_BoundaryInvalid: The call succeeded but the result is not a valid boundary due +/// to not being set up. +/// +OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetBoundaryVisible(ovrSession session, ovrBool* outIsVisible); + +/// Requests boundary to be visible. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] visible forces the outer boundary to be visible. An application can't force it +/// to be invisible, but can cancel its request by passing false. +/// \return Returns ovrSuccess upon success. +/// +OVR_PUBLIC_FUNCTION(ovrResult) ovr_RequestBoundaryVisible(ovrSession session, ovrBool visible); + +// ----------------------------------------------------------------------------------- +/// @name Mixed reality capture support +/// +/// Defines functions used for mixed reality capture / third person cameras. +/// + +/// Returns the number of camera properties of all cameras +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in out] cameras Pointer to the array. If null and the provided array capacity is +/// sufficient, will return ovrError_NullArrayPointer. +/// \param[in out] inoutCameraCount Supply the +/// array capacity, will return the actual # of cameras defined. If *inoutCameraCount is too small, +/// will return ovrError_InsufficientArraySize. +/// \return Returns the list of external cameras the system knows about. +/// Returns ovrError_NoExternalCameraInfo if there is not any eternal camera information. +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_GetExternalCameras( + ovrSession session, + ovrExternalCamera* cameras, + unsigned int* inoutCameraCount); + +/// Sets the camera intrinsics and/or extrinsics stored for the cameraName camera +/// Names must be < 32 characters and null-terminated. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] name Specifies which camera to set the intrinsics or extrinsics for. +/// The name must be at most OVR_EXTERNAL_CAMERA_NAME_SIZE - 1 +/// characters. Otherwise, ovrError_ExternalCameraNameWrongSize is returned. +/// \param[in] intrinsics Contains the intrinsic parameters to set, can be null +/// \param[in] extrinsics Contains the extrinsic parameters to set, can be null +/// \return Returns ovrSuccess or an ovrError code +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_SetExternalCameraProperties( + ovrSession session, + const char* name, + const ovrCameraIntrinsics* const intrinsics, + const ovrCameraExtrinsics* const extrinsics); + +///@} + +#endif // !defined(OVR_EXPORTING_CAPI) + +//------------------------------------------------------------------------------------- +// @name Layers +// +///@{ + +/// Specifies the maximum number of layers supported by ovr_SubmitFrame. +/// +/// /see ovr_SubmitFrame +/// +enum { ovrMaxLayerCount = 16 }; + +/// Describes layer types that can be passed to ovr_SubmitFrame. +/// Each layer type has an associated struct, such as ovrLayerEyeFov. +/// +/// \see ovrLayerHeader +/// +typedef enum ovrLayerType_ { + /// Layer is disabled. + ovrLayerType_Disabled = 0, + + /// Described by ovrLayerEyeFov. + ovrLayerType_EyeFov = 1, + + /// Described by ovrLayerEyeFovDepth. + ovrLayerType_EyeFovDepth = 2, + + /// Described by ovrLayerQuad. Previously called ovrLayerType_QuadInWorld. + ovrLayerType_Quad = 3, + + // enum 4 used to be ovrLayerType_QuadHeadLocked. Instead, use ovrLayerType_Quad with + // ovrLayerFlag_HeadLocked. + + /// Described by ovrLayerEyeMatrix. + ovrLayerType_EyeMatrix = 5, + + + /// Described by ovrLayerEyeFovMultires. + ovrLayerType_EyeFovMultires = 7, + + /// Described by ovrLayerCylinder. + ovrLayerType_Cylinder = 8, + + /// Described by ovrLayerCube + ovrLayerType_Cube = 10, + + + ovrLayerType_EnumSize = 0x7fffffff ///< Force type int32_t. + +} ovrLayerType; + +/// Identifies flags used by ovrLayerHeader and which are passed to ovr_SubmitFrame. +/// +/// \see ovrLayerHeader +/// +typedef enum ovrLayerFlags_ { + /// ovrLayerFlag_HighQuality enables 4x anisotropic sampling during the composition of the layer. + /// The benefits are mostly visible at the periphery for high-frequency & high-contrast visuals. + /// For best results consider combining this flag with an ovrTextureSwapChain that has mipmaps and + /// instead of using arbitrary sized textures, prefer texture sizes that are powers-of-two. + /// Actual rendered viewport and doesn't necessarily have to fill the whole texture. + ovrLayerFlag_HighQuality = 0x01, + + /// ovrLayerFlag_TextureOriginAtBottomLeft: the opposite is TopLeft. + /// Generally this is false for D3D, true for OpenGL. + ovrLayerFlag_TextureOriginAtBottomLeft = 0x02, + + /// Mark this surface as "headlocked", which means it is specified + /// relative to the HMD and moves with it, rather than being specified + /// relative to sensor/torso space and remaining still while the head moves. + /// What used to be ovrLayerType_QuadHeadLocked is now ovrLayerType_Quad plus this flag. + /// However the flag can be applied to any layer type to achieve a similar effect. + ovrLayerFlag_HeadLocked = 0x04, + + +} ovrLayerFlags; + +/// Defines properties shared by all ovrLayer structs, such as ovrLayerEyeFov. +/// +/// ovrLayerHeader is used as a base member in these larger structs. +/// This struct cannot be used by itself except for the case that Type is ovrLayerType_Disabled. +/// +/// \see ovrLayerType, ovrLayerFlags +/// +typedef struct OVR_ALIGNAS(OVR_PTR_SIZE) ovrLayerHeader_ { + ovrLayerType Type; ///< Described by ovrLayerType. + unsigned Flags; ///< Described by ovrLayerFlags. +} ovrLayerHeader; + +/// Describes a layer that specifies a monoscopic or stereoscopic view. +/// This is the kind of layer that's typically used as layer 0 to ovr_SubmitFrame, +/// as it is the kind of layer used to render a 3D stereoscopic view. +/// +/// Three options exist with respect to mono/stereo texture usage: +/// - ColorTexture[0] and ColorTexture[1] contain the left and right stereo renderings, +/// respectively. +/// Viewport[0] and Viewport[1] refer to ColorTexture[0] and ColorTexture[1], respectively. +/// - ColorTexture[0] contains both the left and right renderings, ColorTexture[1] is NULL, +/// and Viewport[0] and Viewport[1] refer to sub-rects with ColorTexture[0]. +/// - ColorTexture[0] contains a single monoscopic rendering, and Viewport[0] and +/// Viewport[1] both refer to that rendering. +/// +/// \see ovrTextureSwapChain, ovr_SubmitFrame +/// +typedef struct OVR_ALIGNAS(OVR_PTR_SIZE) ovrLayerEyeFov_ { + /// Header.Type must be ovrLayerType_EyeFov. + ovrLayerHeader Header; + + /// ovrTextureSwapChains for the left and right eye respectively. + /// The second one of which can be NULL for cases described above. + ovrTextureSwapChain ColorTexture[ovrEye_Count]; + + /// Specifies the ColorTexture sub-rect UV coordinates. + /// Both Viewport[0] and Viewport[1] must be valid. + ovrRecti Viewport[ovrEye_Count]; + + /// The viewport field of view. + ovrFovPort Fov[ovrEye_Count]; + + /// Specifies the position and orientation of each eye view, with position specified in meters. + /// RenderPose will typically be the value returned from ovr_CalcEyePoses, + /// but can be different in special cases if a different head pose is used for rendering. + ovrPosef RenderPose[ovrEye_Count]; + + /// Specifies the timestamp when the source ovrPosef (used in calculating RenderPose) + /// was sampled from the SDK. Typically retrieved by calling ovr_GetTimeInSeconds + /// around the instant the application calls ovr_GetTrackingState + /// The main purpose for this is to accurately track app tracking latency. + double SensorSampleTime; + +} ovrLayerEyeFov; + +/// Describes a layer that specifies a monoscopic or stereoscopic view, +/// with depth textures in addition to color textures. This is typically used to support +/// positional time warp. This struct is the same as ovrLayerEyeFov, but with the addition +/// of DepthTexture and ProjectionDesc. +/// +/// ProjectionDesc can be created using ovrTimewarpProjectionDesc_FromProjection. +/// +/// Three options exist with respect to mono/stereo texture usage: +/// - ColorTexture[0] and ColorTexture[1] contain the left and right stereo renderings, +/// respectively. +/// Viewport[0] and Viewport[1] refer to ColorTexture[0] and ColorTexture[1], respectively. +/// - ColorTexture[0] contains both the left and right renderings, ColorTexture[1] is NULL, +/// and Viewport[0] and Viewport[1] refer to sub-rects with ColorTexture[0]. +/// - ColorTexture[0] contains a single monoscopic rendering, and Viewport[0] and +/// Viewport[1] both refer to that rendering. +/// +/// \see ovrTextureSwapChain, ovr_SubmitFrame +/// +typedef struct OVR_ALIGNAS(OVR_PTR_SIZE) ovrLayerEyeFovDepth_ { + /// Header.Type must be ovrLayerType_EyeFovDepth. + ovrLayerHeader Header; + + /// ovrTextureSwapChains for the left and right eye respectively. + /// The second one of which can be NULL for cases described above. + ovrTextureSwapChain ColorTexture[ovrEye_Count]; + + /// Specifies the ColorTexture sub-rect UV coordinates. + /// Both Viewport[0] and Viewport[1] must be valid. + ovrRecti Viewport[ovrEye_Count]; + + /// The viewport field of view. + ovrFovPort Fov[ovrEye_Count]; + + /// Specifies the position and orientation of each eye view, with position specified in meters. + /// RenderPose will typically be the value returned from ovr_CalcEyePoses, + /// but can be different in special cases if a different head pose is used for rendering. + ovrPosef RenderPose[ovrEye_Count]; + + /// Specifies the timestamp when the source ovrPosef (used in calculating RenderPose) + /// was sampled from the SDK. Typically retrieved by calling ovr_GetTimeInSeconds + /// around the instant the application calls ovr_GetTrackingState + /// The main purpose for this is to accurately track app tracking latency. + double SensorSampleTime; + + /// Depth texture for positional timewarp. + /// Must map 1:1 to the ColorTexture. + ovrTextureSwapChain DepthTexture[ovrEye_Count]; + + /// Specifies how to convert DepthTexture information into meters. + /// \see ovrTimewarpProjectionDesc_FromProjection + ovrTimewarpProjectionDesc ProjectionDesc; + +} ovrLayerEyeFovDepth; + +/// Describes eye texture layouts. Used with ovrLayerEyeFovMultires. +/// +typedef enum ovrTextureLayout_ { + ovrTextureLayout_Rectilinear = 0, ///< Regular eyeFov layer. + ovrTextureLayout_Octilinear = 1, ///< Octilinear extension must be enabled. + ovrTextureLayout_EnumSize = 0x7fffffff ///< Force type int32_t. +} ovrTextureLayout; + +/// Multiresolution descriptor for Octilinear. +/// +/// Usage of this layer must be successfully enabled via ovr_EnableExtension +/// before it can be used. +/// +/// \see ovrLayerEyeFovMultres +/// +typedef struct OVR_ALIGNAS(OVR_PTR_SIZE) ovrTextureLayoutOctilinear_ { + // W warping + float WarpLeft; + float WarpRight; + float WarpUp; + float WarpDown; + + // Size of W quadrants. + // + // SizeLeft + SizeRight <= Viewport.Size.w + // SizeUp + sizeDown <= Viewport.Size.h + // + // Clip space (0,0) is located at Viewport.Pos + (SizeLeft,SizeUp) where + // Viewport is given in the layer description. + // + // Viewport Top left + // +-----------------------------------------------------+ + // | ^ | | + // | | | | + // | 0 SizeUp 1 | | + // | | |<--Portion of viewport + // | | | determined by sizes + // | | | | + // |<--------SizeLeft-------+-------SizeRight------>| | + // | | | | + // | | | | + // | 2 SizeDown 3 | | + // | | | | + // | | | | + // | v | | + // +------------------------------------------------+ | + // | | + // +-----------------------------------------------------+ + // Viewport bottom right + // + // For example, when rendering quadrant 0 its scissor rectangle will be + // + // Top = 0 + // Left = 0 + // Right = SizeLeft + // Bottom = SizeUp + // + // and the scissor rectangle for quadrant 1 will be: + // + // Top = 0 + // Left = SizeLeft + // Right = SizeLeft + SizeRight + // Bottom = SizeUp + // + float SizeLeft; + float SizeRight; + float SizeUp; + float SizeDown; + +} ovrTextureLayoutOctilinear; + +/// Combines texture layout descriptors. +/// +typedef union OVR_ALIGNAS(OVR_PTR_SIZE) ovrTextureLayoutDesc_Union_ { + ovrTextureLayoutOctilinear Octilinear[ovrEye_Count]; +} ovrTextureLayoutDesc_Union; + +/// Describes a layer that specifies a monoscopic or stereoscopic view with +/// support for optional multiresolution textures. This struct is the same as +/// ovrLayerEyeFov plus texture layout parameters. +/// +/// Three options exist with respect to mono/stereo texture usage: +/// - ColorTexture[0] and ColorTexture[1] contain the left and right stereo renderings, +/// respectively. +/// Viewport[0] and Viewport[1] refer to ColorTexture[0] and ColorTexture[1], respectively. +/// - ColorTexture[0] contains both the left and right renderings, ColorTexture[1] is NULL, +/// and Viewport[0] and Viewport[1] refer to sub-rects with ColorTexture[0]. +/// - ColorTexture[0] contains a single monoscopic rendering, and Viewport[0] and +/// Viewport[1] both refer to that rendering. +/// +/// \see ovrTextureSwapChain, ovr_SubmitFrame +/// +typedef struct OVR_ALIGNAS(OVR_PTR_SIZE) ovrLayerEyeFovMultires_ { + /// Header.Type must be ovrLayerType_EyeFovMultires. + ovrLayerHeader Header; + + /// ovrTextureSwapChains for the left and right eye respectively. + /// The second one of which can be NULL for cases described above. + ovrTextureSwapChain ColorTexture[ovrEye_Count]; + + /// Specifies the ColorTexture sub-rect UV coordinates. + /// Both Viewport[0] and Viewport[1] must be valid. + ovrRecti Viewport[ovrEye_Count]; + + /// The viewport field of view. + ovrFovPort Fov[ovrEye_Count]; + + /// Specifies the position and orientation of each eye view, with position specified in meters. + /// RenderPose will typically be the value returned from ovr_CalcEyePoses, + /// but can be different in special cases if a different head pose is used for rendering. + ovrPosef RenderPose[ovrEye_Count]; + + /// Specifies the timestamp when the source ovrPosef (used in calculating RenderPose) + /// was sampled from the SDK. Typically retrieved by calling ovr_GetTimeInSeconds + /// around the instant the application calls ovr_GetTrackingState + /// The main purpose for this is to accurately track app tracking latency. + double SensorSampleTime; + + /// Specifies layout type of textures. + ovrTextureLayout TextureLayout; + + /// Specifies texture layout parameters. + ovrTextureLayoutDesc_Union TextureLayoutDesc; + +} ovrLayerEyeFovMultires; + +/// Describes a layer that specifies a monoscopic or stereoscopic view. +/// This uses a direct 3x4 matrix to map from view space to the UV coordinates. +/// It is essentially the same thing as ovrLayerEyeFov but using a much +/// lower level. This is mainly to provide compatibility with specific apps. +/// Unless the application really requires this flexibility, it is usually better +/// to use ovrLayerEyeFov. +/// +/// Three options exist with respect to mono/stereo texture usage: +/// - ColorTexture[0] and ColorTexture[1] contain the left and right stereo renderings, +/// respectively. +/// Viewport[0] and Viewport[1] refer to ColorTexture[0] and ColorTexture[1], respectively. +/// - ColorTexture[0] contains both the left and right renderings, ColorTexture[1] is NULL, +/// and Viewport[0] and Viewport[1] refer to sub-rects with ColorTexture[0]. +/// - ColorTexture[0] contains a single monoscopic rendering, and Viewport[0] and +/// Viewport[1] both refer to that rendering. +/// +/// \see ovrTextureSwapChain, ovr_SubmitFrame +/// +typedef struct OVR_ALIGNAS(OVR_PTR_SIZE) ovrLayerEyeMatrix_ { + /// Header.Type must be ovrLayerType_EyeMatrix. + ovrLayerHeader Header; + + /// ovrTextureSwapChains for the left and right eye respectively. + /// The second one of which can be NULL for cases described above. + ovrTextureSwapChain ColorTexture[ovrEye_Count]; + + /// Specifies the ColorTexture sub-rect UV coordinates. + /// Both Viewport[0] and Viewport[1] must be valid. + ovrRecti Viewport[ovrEye_Count]; + + /// Specifies the position and orientation of each eye view, with position specified in meters. + /// RenderPose will typically be the value returned from ovr_CalcEyePoses, + /// but can be different in special cases if a different head pose is used for rendering. + ovrPosef RenderPose[ovrEye_Count]; + + /// Specifies the mapping from a view-space vector + /// to a UV coordinate on the textures given above. + /// P = (x,y,z,1)*Matrix + /// TexU = P.x/P.z + /// TexV = P.y/P.z + ovrMatrix4f Matrix[ovrEye_Count]; + + /// Specifies the timestamp when the source ovrPosef (used in calculating RenderPose) + /// was sampled from the SDK. Typically retrieved by calling ovr_GetTimeInSeconds + /// around the instant the application calls ovr_GetTrackingState + /// The main purpose for this is to accurately track app tracking latency. + double SensorSampleTime; + +} ovrLayerEyeMatrix; + +/// Describes a layer of Quad type, which is a single quad in world or viewer space. +/// It is used for ovrLayerType_Quad. This type of layer represents a single +/// object placed in the world and not a stereo view of the world itself. +/// +/// A typical use of ovrLayerType_Quad is to draw a television screen in a room +/// that for some reason is more convenient to draw as a layer than as part of the main +/// view in layer 0. For example, it could implement a 3D popup GUI that is drawn at a +/// higher resolution than layer 0 to improve fidelity of the GUI. +/// +/// Quad layers are visible from both sides; they are not back-face culled. +/// +/// \see ovrTextureSwapChain, ovr_SubmitFrame +/// +typedef struct OVR_ALIGNAS(OVR_PTR_SIZE) ovrLayerQuad_ { + /// Header.Type must be ovrLayerType_Quad. + ovrLayerHeader Header; + + /// Contains a single image, never with any stereo view. + ovrTextureSwapChain ColorTexture; + + /// Specifies the ColorTexture sub-rect UV coordinates. + ovrRecti Viewport; + + /// Specifies the orientation and position of the center point of a Quad layer type. + /// The supplied direction is the vector perpendicular to the quad. + /// The position is in real-world meters (not the application's virtual world, + /// the physical world the user is in) and is relative to the "zero" position + /// set by ovr_RecenterTrackingOrigin unless the ovrLayerFlag_HeadLocked flag is used. + ovrPosef QuadPoseCenter; + + /// Width and height (respectively) of the quad in meters. + ovrVector2f QuadSize; + +} ovrLayerQuad; + +/// Describes a layer of type ovrLayerType_Cylinder which is a single cylinder +/// relative to the recentered origin. This type of layer represents a single +/// object placed in the world and not a stereo view of the world itself. +/// +/// -Z +Y +/// U=0 +--+--+ U=1 +/// +---+ | +---+ +-----------------+ - V=0 +/// +--+ \ | / +--+ | | | +/// +-+ \ / +-+ | | | +/// ++ \ A / ++ | | | +/// ++ \---/ ++ | | | +/// | \ / | | +X | | +/// +-------------C------R------+ +X +--------C--------+ | <--- Height +/// (+Y is out of screen) | | | +/// | | | +/// R = Radius | | | +/// A = Angle (0,2*Pi) | | | +/// C = CylinderPoseCenter | | | +/// U/V = UV Coordinates +-----------------+ - V=1 +/// +/// An identity CylinderPoseCenter places the center of the cylinder +/// at the recentered origin unless the headlocked flag is set. +/// +/// Does not utilize HmdSpaceToWorldScaleInMeters. If necessary, adjust +/// translation and radius. +/// +/// \note Only the interior surface of the cylinder is visible. Use cylinder +/// layers when the user cannot leave the extents of the cylinder. Artifacts may +/// appear when viewing the cylinder's exterior surface. Additionally, while the +/// interface supports an Angle that ranges from [0,2*Pi] the angle should +/// remain less than 1.9*PI to avoid artifacts where the cylinder edges +/// converge. +/// +/// \see ovrTextureSwapChain, ovr_SubmitFrame +/// +typedef struct OVR_ALIGNAS(OVR_PTR_SIZE) ovrLayerCylinder_ { + /// Header.Type must be ovrLayerType_Cylinder. + ovrLayerHeader Header; + + /// Contains a single image, never with any stereo view. + ovrTextureSwapChain ColorTexture; + + /// Specifies the ColorTexture sub-rect UV coordinates. + ovrRecti Viewport; + + /// Specifies the orientation and position of the center point of a cylinder layer type. + /// The position is in real-world meters not the application's virtual world, + /// but the physical world the user is in. It is relative to the "zero" position + /// set by ovr_RecenterTrackingOrigin unless the ovrLayerFlag_HeadLocked flag is used. + ovrPosef CylinderPoseCenter; + + /// Radius of the cylinder in meters. + float CylinderRadius; + + /// Angle in radians. Range is from 0 to 2*Pi exclusive covering the entire + /// cylinder (see diagram and note above). + float CylinderAngle; + + /// Custom aspect ratio presumably set based on 'Viewport'. Used to + /// calculate the height of the cylinder based on the arc-length (CylinderAngle) + /// and radius (CylinderRadius) given above. The height of the cylinder is + /// given by: height = (CylinderRadius * CylinderAngle) / CylinderAspectRatio. + /// Aspect ratio is width / height. + float CylinderAspectRatio; + +} ovrLayerCylinder; + +/// Describes a layer of type ovrLayerType_Cube which is a single timewarped +/// cubemap at infinity. When looking down the recentered origin's -Z axis, +X +/// face is left and +Y face is up. Similarly, if headlocked the +X face is +/// left, +Y face is up and -Z face is forward. Note that the coordinate system +/// is left-handed. +/// +/// ovrLayerFlag_TextureOriginAtBottomLeft flag is not supported by ovrLayerCube. +/// +/// \see ovrTextureSwapChain, ovr_SubmitFrame +/// +typedef struct OVR_ALIGNAS(OVR_PTR_SIZE) ovrLayerCube_ { + /// Header.Type must be ovrLayerType_Cube. + ovrLayerHeader Header; + + /// Orientation of the cube. + ovrQuatf Orientation; + + /// Contains a single cubemap swapchain (not a stereo pair of swapchains). + ovrTextureSwapChain CubeMapTexture; +} ovrLayerCube; + + + +/// Union that combines ovrLayer types in a way that allows them +/// to be used in a polymorphic way. +typedef union ovrLayer_Union_ { + ovrLayerHeader Header; + ovrLayerEyeFov EyeFov; + ovrLayerEyeFovDepth EyeFovDepth; + ovrLayerQuad Quad; + ovrLayerEyeFovMultires Multires; + ovrLayerCylinder Cylinder; + ovrLayerCube Cube; +} ovrLayer_Union; + +//@} + +#if !defined(OVR_EXPORTING_CAPI) + +/// @name SDK Distortion Rendering +/// +/// All of rendering functions including the configure and frame functions +/// are not thread safe. It is OK to use ConfigureRendering on one thread and handle +/// frames on another thread, but explicit synchronization must be done since +/// functions that depend on configured state are not reentrant. +/// +/// These functions support rendering of distortion by the SDK. +/// +//@{ + +/// TextureSwapChain creation is rendering API-specific. +/// ovr_CreateTextureSwapChainDX and ovr_CreateTextureSwapChainGL can be found in the +/// rendering API-specific headers, such as OVR_CAPI_D3D.h and OVR_CAPI_GL.h + +/// Gets the number of buffers in an ovrTextureSwapChain. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] chain Specifies the ovrTextureSwapChain for which the length should be retrieved. +/// \param[out] out_Length Returns the number of buffers in the specified chain. +/// +/// \return Returns an ovrResult for which OVR_SUCCESS(result) is false upon error. +/// +/// \see ovr_CreateTextureSwapChainDX, ovr_CreateTextureSwapChainGL +/// +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_GetTextureSwapChainLength(ovrSession session, ovrTextureSwapChain chain, int* out_Length); + +/// Gets the current index in an ovrTextureSwapChain. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] chain Specifies the ovrTextureSwapChain for which the index should be retrieved. +/// \param[out] out_Index Returns the current (free) index in specified chain. +/// +/// \return Returns an ovrResult for which OVR_SUCCESS(result) is false upon error. +/// +/// \see ovr_CreateTextureSwapChainDX, ovr_CreateTextureSwapChainGL +/// +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_GetTextureSwapChainCurrentIndex(ovrSession session, ovrTextureSwapChain chain, int* out_Index); + +/// Gets the description of the buffers in an ovrTextureSwapChain +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] chain Specifies the ovrTextureSwapChain for which the description +/// should be retrieved. +/// \param[out] out_Desc Returns the description of the specified chain. +/// +/// \return Returns an ovrResult for which OVR_SUCCESS(result) is false upon error. +/// +/// \see ovr_CreateTextureSwapChainDX, ovr_CreateTextureSwapChainGL +/// +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_GetTextureSwapChainDesc( + ovrSession session, + ovrTextureSwapChain chain, + ovrTextureSwapChainDesc* out_Desc); + +/// Commits any pending changes to an ovrTextureSwapChain, and advances its current index +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] chain Specifies the ovrTextureSwapChain to commit. +/// +/// \note When Commit is called, the texture at the current index is considered ready for use by the +/// runtime, and further writes to it should be avoided. The swap chain's current index is advanced, +/// providing there's room in the chain. The next time the SDK dereferences this texture swap chain, +/// it will synchronize with the app's graphics context and pick up the submitted index, opening up +/// room in the swap chain for further commits. +/// +/// \return Returns an ovrResult for which OVR_SUCCESS(result) is false upon error. +/// Failures include but aren't limited to: +/// - ovrError_TextureSwapChainFull: ovr_CommitTextureSwapChain was called too many times on a +/// texture swapchain without calling submit to use the chain. +/// +/// \see ovr_CreateTextureSwapChainDX, ovr_CreateTextureSwapChainGL +/// +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_CommitTextureSwapChain(ovrSession session, ovrTextureSwapChain chain); + +/// Destroys an ovrTextureSwapChain and frees all the resources associated with it. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] chain Specifies the ovrTextureSwapChain to destroy. If it is NULL then +/// this function has no effect. +/// +/// \see ovr_CreateTextureSwapChainDX, ovr_CreateTextureSwapChainGL +/// +OVR_PUBLIC_FUNCTION(void) +ovr_DestroyTextureSwapChain(ovrSession session, ovrTextureSwapChain chain); + +/// MirrorTexture creation is rendering API-specific. +/// ovr_CreateMirrorTextureWithOptionsDX and ovr_CreateMirrorTextureWithOptionsGL can be found in +/// rendering API-specific headers, such as OVR_CAPI_D3D.h and OVR_CAPI_GL.h + +/// Destroys a mirror texture previously created by one of the mirror texture creation functions. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] mirrorTexture Specifies the ovrTexture to destroy. If it is NULL then +/// this function has no effect. +/// +/// \see ovr_CreateMirrorTextureWithOptionsDX, ovr_CreateMirrorTextureWithOptionsGL +/// +OVR_PUBLIC_FUNCTION(void) +ovr_DestroyMirrorTexture(ovrSession session, ovrMirrorTexture mirrorTexture); + +/// Calculates the recommended viewport size for rendering a given eye within the HMD +/// with a given FOV cone. +/// +/// Higher FOV will generally require larger textures to maintain quality. +/// Apps packing multiple eye views together on the same texture should ensure there are +/// at least 8 pixels of padding between them to prevent texture filtering and chromatic +/// aberration causing images to leak between the two eye views. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] eye Specifies which eye (left or right) to calculate for. +/// \param[in] fov Specifies the ovrFovPort to use. +/// \param[in] pixelsPerDisplayPixel Specifies the ratio of the number of render target pixels +/// to display pixels at the center of distortion. 1.0 is the default value. Lower +/// values can improve performance, higher values give improved quality. +/// +/// Example code +/// \code{.cpp} +/// ovrHmdDesc hmdDesc = ovr_GetHmdDesc(session); +/// ovrSizei eyeSizeLeft = ovr_GetFovTextureSize(session, ovrEye_Left, +/// hmdDesc.DefaultEyeFov[ovrEye_Left], 1.0f); +/// ovrSizei eyeSizeRight = ovr_GetFovTextureSize(session, ovrEye_Right, +/// hmdDesc.DefaultEyeFov[ovrEye_Right], 1.0f); +/// \endcode +/// +/// \return Returns the texture width and height size. +/// +OVR_PUBLIC_FUNCTION(ovrSizei) +ovr_GetFovTextureSize( + ovrSession session, + ovrEyeType eye, + ovrFovPort fov, + float pixelsPerDisplayPixel); + +/// Computes the distortion viewport, view adjust, and other rendering parameters for +/// the specified eye. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] eyeType Specifies which eye (left or right) for which to perform calculations. +/// \param[in] fov Specifies the ovrFovPort to use. +/// +/// \return Returns the computed ovrEyeRenderDesc for the given eyeType and field of view. +/// +/// \see ovrEyeRenderDesc +/// +OVR_PUBLIC_FUNCTION(ovrEyeRenderDesc) +ovr_GetRenderDesc(ovrSession session, ovrEyeType eyeType, ovrFovPort fov); + +/// Waits until surfaces are available and it is time to begin rendering the frame. Must be +/// called before ovr_BeginFrame, but not necessarily from the same thread. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// +/// \param[in] frameIndex Specifies the targeted application frame index. +/// +/// \return Returns an ovrResult for which OVR_SUCCESS(result) is false upon error and true +/// upon success. Return values include but aren't limited to: +/// - ovrSuccess: command completed successfully. +/// - ovrSuccess_NotVisible: rendering of a previous frame completed successfully but was not +/// displayed on the HMD, usually because another application currently has ownership of the +/// HMD. Applications receiving this result should stop rendering new content and call +/// ovr_GetSessionStatus to detect visibility. +/// - ovrError_DisplayLost: The session has become invalid (such as due to a device removal) +/// and the shared resources need to be released (ovr_DestroyTextureSwapChain), the session +/// needs to destroyed (ovr_Destroy) and recreated (ovr_Create), and new resources need to be +/// created (ovr_CreateTextureSwapChainXXX). The application's existing private graphics +/// resources do not need to be recreated unless the new ovr_Create call returns a different +/// GraphicsLuid. +/// +/// \see ovr_BeginFrame, ovr_EndFrame, ovr_GetSessionStatus +/// +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_WaitToBeginFrame(ovrSession session, long long frameIndex); + +/// Called from render thread before application begins rendering. Must be called after +/// ovr_WaitToBeginFrame and before ovr_EndFrame, but not necessarily from the same threads. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// +/// \param[in] frameIndex Specifies the targeted application frame index. It must match what was +/// passed to ovr_WaitToBeginFrame. +/// +/// \return Returns an ovrResult for which OVR_SUCCESS(result) is false upon error and true +/// upon success. Return values include but aren't limited to: +/// - ovrSuccess: command completed successfully. +/// - ovrError_DisplayLost: The session has become invalid (such as due to a device removal) +/// and the shared resources need to be released (ovr_DestroyTextureSwapChain), the session +/// needs to destroyed (ovr_Destroy) and recreated (ovr_Create), and new resources need to be +/// created (ovr_CreateTextureSwapChainXXX). The application's existing private graphics +/// resources do not need to be recreated unless the new ovr_Create call returns a different +/// GraphicsLuid. +/// +/// \see ovr_WaitToBeginFrame, ovr_EndFrame +/// +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_BeginFrame(ovrSession session, long long frameIndex); + +/// Called from render thread after application has finished rendering. Must be called after +/// ovr_BeginFrame, but not necessarily from the same thread. Submits layers for distortion and +/// display, which will happen asynchronously. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// +/// \param[in] frameIndex Specifies the targeted application frame index. It must match what was +/// passed to ovr_BeginFrame. +/// +/// \param[in] viewScaleDesc Provides additional information needed only if layerPtrList contains +/// an ovrLayerType_Quad. If NULL, a default version is used based on the current +/// configuration and a 1.0 world scale. +/// +/// \param[in] layerPtrList Specifies a list of ovrLayer pointers, which can include NULL entries to +/// indicate that any previously shown layer at that index is to not be displayed. +/// Each layer header must be a part of a layer structure such as ovrLayerEyeFov or +/// ovrLayerQuad, with Header.Type identifying its type. A NULL layerPtrList entry in the +/// array indicates the absence of the given layer. +/// +/// \param[in] layerCount Indicates the number of valid elements in layerPtrList. The maximum +/// supported layerCount is not currently specified, but may be specified in a future +/// version. +/// +/// - Layers are drawn in the order they are specified in the array, regardless of the layer type. +/// +/// - Layers are not remembered between successive calls to ovr_SubmitFrame. A layer must be +/// specified in every call to ovr_SubmitFrame or it won't be displayed. +/// +/// - If a layerPtrList entry that was specified in a previous call to ovr_SubmitFrame is +/// passed as NULL or is of type ovrLayerType_Disabled, that layer is no longer displayed. +/// +/// - A layerPtrList entry can be of any layer type and multiple entries of the same layer type +/// are allowed. No layerPtrList entry may be duplicated (i.e. the same pointer as an earlier +/// entry). +/// +/// Example code +/// \code{.cpp} +/// ovrLayerEyeFov layer0; +/// ovrLayerQuad layer1; +/// ... +/// ovrLayerHeader* layers[2] = { &layer0.Header, &layer1.Header }; +/// ovrResult result = ovr_EndFrame(session, frameIndex, nullptr, layers, 2); +/// \endcode +/// +/// \return Returns an ovrResult for which OVR_SUCCESS(result) is false upon error and true +/// upon success. Return values include but aren't limited to: +/// - ovrSuccess: rendering completed successfully. +/// - ovrError_DisplayLost: The session has become invalid (such as due to a device removal) +/// and the shared resources need to be released (ovr_DestroyTextureSwapChain), the session +/// needs to destroyed (ovr_Destroy) and recreated (ovr_Create), and new resources need to be +/// created (ovr_CreateTextureSwapChainXXX). The application's existing private graphics +/// resources do not need to be recreated unless the new ovr_Create call returns a different +/// GraphicsLuid. +/// - ovrError_TextureSwapChainInvalid: The ovrTextureSwapChain is in an incomplete or +/// inconsistent state. Ensure ovr_CommitTextureSwapChain was called at least once first. +/// +/// \see ovr_WaitToBeginFrame, ovr_BeginFrame, ovrViewScaleDesc, ovrLayerHeader +/// +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_EndFrame( + ovrSession session, + long long frameIndex, + const ovrViewScaleDesc* viewScaleDesc, + ovrLayerHeader const* const* layerPtrList, + unsigned int layerCount); + +/// Submits layers for distortion and display. +/// +/// Deprecated. Use ovr_WaitToBeginFrame, ovr_BeginFrame, and ovr_EndFrame instead. +/// +/// ovr_SubmitFrame triggers distortion and processing which might happen asynchronously. +/// The function will return when there is room in the submission queue and surfaces +/// are available. Distortion might or might not have completed. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// +/// \param[in] frameIndex Specifies the targeted application frame index, or 0 to refer to one frame +/// after the last time ovr_SubmitFrame was called. +/// +/// \param[in] viewScaleDesc Provides additional information needed only if layerPtrList contains +/// an ovrLayerType_Quad. If NULL, a default version is used based on the current +/// configuration and a 1.0 world scale. +/// +/// \param[in] layerPtrList Specifies a list of ovrLayer pointers, which can include NULL entries to +/// indicate that any previously shown layer at that index is to not be displayed. +/// Each layer header must be a part of a layer structure such as ovrLayerEyeFov or +/// ovrLayerQuad, with Header.Type identifying its type. A NULL layerPtrList entry in the +/// array indicates the absence of the given layer. +/// +/// \param[in] layerCount Indicates the number of valid elements in layerPtrList. The maximum +/// supported layerCount is not currently specified, but may be specified in a future +/// version. +/// +/// - Layers are drawn in the order they are specified in the array, regardless of the layer type. +/// +/// - Layers are not remembered between successive calls to ovr_SubmitFrame. A layer must be +/// specified in every call to ovr_SubmitFrame or it won't be displayed. +/// +/// - If a layerPtrList entry that was specified in a previous call to ovr_SubmitFrame is +/// passed as NULL or is of type ovrLayerType_Disabled, that layer is no longer displayed. +/// +/// - A layerPtrList entry can be of any layer type and multiple entries of the same layer type +/// are allowed. No layerPtrList entry may be duplicated (i.e. the same pointer as an earlier +/// entry). +/// +/// Example code +/// \code{.cpp} +/// ovrLayerEyeFov layer0; +/// ovrLayerQuad layer1; +/// ... +/// ovrLayerHeader* layers[2] = { &layer0.Header, &layer1.Header }; +/// ovrResult result = ovr_SubmitFrame(session, frameIndex, nullptr, layers, 2); +/// \endcode +/// +/// \return Returns an ovrResult for which OVR_SUCCESS(result) is false upon error and true +/// upon success. Return values include but aren't limited to: +/// - ovrSuccess: rendering completed successfully. +/// - ovrSuccess_NotVisible: rendering completed successfully but was not displayed on the HMD, +/// usually because another application currently has ownership of the HMD. Applications +/// receiving this result should stop rendering new content, call ovr_GetSessionStatus +/// to detect visibility. +/// - ovrError_DisplayLost: The session has become invalid (such as due to a device removal) +/// and the shared resources need to be released (ovr_DestroyTextureSwapChain), the session +/// needs to destroyed (ovr_Destroy) and recreated (ovr_Create), and new resources need to be +/// created (ovr_CreateTextureSwapChainXXX). The application's existing private graphics +/// resources do not need to be recreated unless the new ovr_Create call returns a different +/// GraphicsLuid. +/// - ovrError_TextureSwapChainInvalid: The ovrTextureSwapChain is in an incomplete or +/// inconsistent state. Ensure ovr_CommitTextureSwapChain was called at least once first. +/// +/// \see ovr_GetPredictedDisplayTime, ovrViewScaleDesc, ovrLayerHeader, ovr_GetSessionStatus +/// +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_SubmitFrame( + ovrSession session, + long long frameIndex, + const ovrViewScaleDesc* viewScaleDesc, + ovrLayerHeader const* const* layerPtrList, + unsigned int layerCount); +///@} + +#endif // !defined(OVR_EXPORTING_CAPI) + +//------------------------------------------------------------------------------------- +/// @name Frame Timing +/// +//@{ + +/// +/// Contains the performance stats for a given SDK compositor frame +/// +/// All of the 'int' typed fields can be reset via the ovr_ResetPerfStats call. +/// +typedef struct OVR_ALIGNAS(4) ovrPerfStatsPerCompositorFrame_ { + /// Vsync Frame Index - increments with each HMD vertical synchronization signal (i.e. vsync or + /// refresh rate) + /// If the compositor drops a frame, expect this value to increment more than 1 at a time. + int HmdVsyncIndex; + + /// + /// Application stats + /// + + /// Index that increments with each successive ovr_SubmitFrame call + int AppFrameIndex; + + /// If the app fails to call ovr_SubmitFrame on time, then expect this value to increment with + /// each missed frame + int AppDroppedFrameCount; + + /// Motion-to-photon latency for the application + /// This value is calculated by either using the SensorSampleTime provided for the ovrLayerEyeFov + /// or if that + /// is not available, then the call to ovr_GetTrackingState which has latencyMarker set to ovrTrue + float AppMotionToPhotonLatency; + + /// Amount of queue-ahead in seconds provided to the app based on performance and overlap of + /// CPU and GPU utilization. A value of 0.0 would mean the CPU & GPU workload is being completed + /// in 1 frame's worth of time, while 11 ms (on the CV1) of queue ahead would indicate that the + /// app's CPU workload for the next frame is overlapping the GPU workload for the current frame. + float AppQueueAheadTime; + + /// Amount of time in seconds spent on the CPU by the app's render-thread that calls + /// ovr_SubmitFram. Measured as elapsed time between from when app regains control from + /// ovr_SubmitFrame to the next time the app calls ovr_SubmitFrame. + float AppCpuElapsedTime; + + /// Amount of time in seconds spent on the GPU by the app. + /// Measured as elapsed time between each ovr_SubmitFrame call using GPU timing queries. + float AppGpuElapsedTime; + + /// + /// SDK Compositor stats + /// + + /// Index that increments each time the SDK compositor completes a distortion and timewarp pass + /// Since the compositor operates asynchronously, even if the app calls ovr_SubmitFrame too late, + /// the compositor will kick off for each vsync. + int CompositorFrameIndex; + + /// Increments each time the SDK compositor fails to complete in time + /// This is not tied to the app's performance, but failure to complete can be related to other + /// factors such as OS capabilities, overall available hardware cycles to execute the compositor + /// in time and other factors outside of the app's control. + int CompositorDroppedFrameCount; + + /// Motion-to-photon latency of the SDK compositor in seconds. + /// This is the latency of timewarp which corrects the higher app latency as well as dropped app + /// frames. + float CompositorLatency; + + /// The amount of time in seconds spent on the CPU by the SDK compositor. Unless the + /// VR app is utilizing all of the CPU cores at their peak performance, there is a good chance the + /// compositor CPU times will not affect the app's CPU performance in a major way. + float CompositorCpuElapsedTime; + + /// The amount of time in seconds spent on the GPU by the SDK compositor. Any time spent on the + /// compositor will eat away from the available GPU time for the app. + float CompositorGpuElapsedTime; + + /// The amount of time in seconds spent from the point the CPU kicks off the compositor to the + /// point in time the compositor completes the distortion & timewarp on the GPU. In the event the + /// GPU time is not available, expect this value to be -1.0f. + float CompositorCpuStartToGpuEndElapsedTime; + + /// The amount of time in seconds left after the compositor is done on the GPU to the associated + /// V-Sync time. In the event the GPU time is not available, expect this value to be -1.0f. + float CompositorGpuEndToVsyncElapsedTime; + + /// + /// Async Spacewarp stats (ASW) + /// + + /// Will be true if ASW is active for the given frame such that the application is being forced + /// into half the frame-rate while the compositor continues to run at full frame-rate. + ovrBool AswIsActive; + + /// Increments each time ASW it activated where the app was forced in and out of + /// half-rate rendering. + int AswActivatedToggleCount; + + /// Accumulates the number of frames presented by the compositor which had extrapolated + /// ASW frames presented. + int AswPresentedFrameCount; + + /// Accumulates the number of frames that the compositor tried to present when ASW is + /// active but failed. + int AswFailedFrameCount; + +} ovrPerfStatsPerCompositorFrame; + +/// +/// Maximum number of frames of performance stats provided back to the caller of ovr_GetPerfStats +/// +enum { ovrMaxProvidedFrameStats = 5 }; + +/// +/// This is a complete descriptor of the performance stats provided by the SDK +/// +/// \see ovr_GetPerfStats, ovrPerfStatsPerCompositorFrame +typedef struct OVR_ALIGNAS(4) ovrPerfStats_ { + /// FrameStatsCount will have a maximum value set by ovrMaxProvidedFrameStats + /// If the application calls ovr_GetPerfStats at the native refresh rate of the HMD + /// then FrameStatsCount will be 1. If the app's workload happens to force + /// ovr_GetPerfStats to be called at a lower rate, then FrameStatsCount will be 2 or more. + /// If the app does not want to miss any performance data for any frame, it needs to + /// ensure that it is calling ovr_SubmitFrame and ovr_GetPerfStats at a rate that is at least: + /// "HMD_refresh_rate / ovrMaxProvidedFrameStats". On the Oculus Rift CV1 HMD, this will + /// be equal to 18 times per second. + /// + /// The performance entries will be ordered in reverse chronological order such that the + /// first entry will be the most recent one. + ovrPerfStatsPerCompositorFrame FrameStats[ovrMaxProvidedFrameStats]; + int FrameStatsCount; + + /// If the app calls ovr_GetPerfStats at less than 18 fps for CV1, then AnyFrameStatsDropped + /// will be ovrTrue and FrameStatsCount will be equal to ovrMaxProvidedFrameStats. + ovrBool AnyFrameStatsDropped; + + /// AdaptiveGpuPerformanceScale is an edge-filtered value that a caller can use to adjust + /// the graphics quality of the application to keep the GPU utilization in check. The value + /// is calculated as: (desired_GPU_utilization / current_GPU_utilization) + /// As such, when this value is 1.0, the GPU is doing the right amount of work for the app. + /// Lower values mean the app needs to pull back on the GPU utilization. + /// If the app is going to directly drive render-target resolution using this value, then + /// be sure to take the square-root of the value before scaling the resolution with it. + /// Changing render target resolutions however is one of the many things an app can do + /// increase or decrease the amount of GPU utilization. + /// Since AdaptiveGpuPerformanceScale is edge-filtered and does not change rapidly + /// (i.e. reports non-1.0 values once every couple of seconds) the app can make the + /// necessary adjustments and then keep watching the value to see if it has been satisfied. + float AdaptiveGpuPerformanceScale; + + /// Will be true if Async Spacewarp (ASW) is available for this system which is dependent on + /// several factors such as choice of GPU, OS and debug overrides + ovrBool AswIsAvailable; + + /// Contains the Process ID of the VR application the stats are being polled for + /// If an app continues to grab perf stats even when it is not visible, then expect this + /// value to point to the other VR app that has grabbed focus (i.e. became visible) + ovrProcessId VisibleProcessId; +} ovrPerfStats; + +#if !defined(OVR_EXPORTING_CAPI) + +/// Retrieves performance stats for the VR app as well as the SDK compositor. +/// +/// This function will return stats for the VR app that is currently visible in the HMD +/// regardless of what VR app is actually calling this function. +/// +/// If the VR app is trying to make sure the stats returned belong to the same application, +/// the caller can compare the VisibleProcessId with their own process ID. Normally this will +/// be the case if the caller is only calling ovr_GetPerfStats when ovr_GetSessionStatus has +/// IsVisible flag set to be true. +/// +/// If the VR app calling ovr_GetPerfStats is actually the one visible in the HMD, +/// then new perf stats will only be populated after a new call to ovr_SubmitFrame. +/// That means subsequent calls to ovr_GetPerfStats after the first one without calling +/// ovr_SubmitFrame will receive a FrameStatsCount of zero. +/// +/// If the VR app is not visible, or was initially marked as ovrInit_Invisible, then each call +/// to ovr_GetPerfStats will immediately fetch new perf stats from the compositor without +/// a need for the ovr_SubmitFrame call. +/// +/// Even though invisible VR apps do not require ovr_SubmitFrame to be called to gather new +/// perf stats, since stats are generated at the native refresh rate of the HMD (i.e. 90 Hz +/// for CV1), calling it at a higher rate than that would be unnecessary. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[out] outStats Contains the performance stats for the application and SDK compositor +/// \return Returns an ovrResult for which OVR_SUCCESS(result) is false upon error and true +/// upon success. +/// +/// \see ovrPerfStats, ovrPerfStatsPerCompositorFrame, ovr_ResetPerfStats +/// +OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetPerfStats(ovrSession session, ovrPerfStats* outStats); + +/// Resets the accumulated stats reported in each ovrPerfStatsPerCompositorFrame back to zero. +/// +/// Only the integer values such as HmdVsyncIndex, AppDroppedFrameCount etc. will be reset +/// as the other fields such as AppMotionToPhotonLatency are independent timing values updated +/// per-frame. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \return Returns an ovrResult for which OVR_SUCCESS(result) is false upon error and true +/// upon success. +/// +/// \see ovrPerfStats, ovrPerfStatsPerCompositorFrame, ovr_GetPerfStats +/// +OVR_PUBLIC_FUNCTION(ovrResult) ovr_ResetPerfStats(ovrSession session); + +/// Gets the time of the specified frame midpoint. +/// +/// Predicts the time at which the given frame will be displayed. The predicted time +/// is the middle of the time period during which the corresponding eye images will +/// be displayed. +/// +/// The application should increment frameIndex for each successively targeted frame, +/// and pass that index to any relevant OVR functions that need to apply to the frame +/// identified by that index. +/// +/// This function is thread-safe and allows for multiple application threads to target +/// their processing to the same displayed frame. +/// +/// In the even that prediction fails due to various reasons (e.g. the display being off +/// or app has yet to present any frames), the return value will be current CPU time. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] frameIndex Identifies the frame the caller wishes to target. +/// A value of zero returns the next frame index. +/// \return Returns the absolute frame midpoint time for the given frameIndex. +/// \see ovr_GetTimeInSeconds +/// +OVR_PUBLIC_FUNCTION(double) ovr_GetPredictedDisplayTime(ovrSession session, long long frameIndex); + +/// Returns global, absolute high-resolution time in seconds. +/// +/// The time frame of reference for this function is not specified and should not be +/// depended upon. +/// +/// \return Returns seconds as a floating point value. +/// \see ovrPoseStatef, ovrFrameTiming +/// +OVR_PUBLIC_FUNCTION(double) ovr_GetTimeInSeconds(); + +#endif // !defined(OVR_EXPORTING_CAPI) + +/// Performance HUD enables the HMD user to see information critical to +/// the real-time operation of the VR application such as latency timing, +/// and CPU & GPU performance metrics +/// +/// App can toggle performance HUD modes as such: +/// \code{.cpp} +/// ovrPerfHudMode PerfHudMode = ovrPerfHud_LatencyTiming; +/// ovr_SetInt(session, OVR_PERF_HUD_MODE, (int)PerfHudMode); +/// \endcode +/// +typedef enum ovrPerfHudMode_ { + ovrPerfHud_Off = 0, ///< Turns off the performance HUD + ovrPerfHud_PerfSummary = 1, ///< Shows performance summary and headroom + ovrPerfHud_LatencyTiming = 2, ///< Shows latency related timing info + ovrPerfHud_AppRenderTiming = 3, ///< Shows render timing info for application + ovrPerfHud_CompRenderTiming = 4, ///< Shows render timing info for OVR compositor + ovrPerfHud_AswStats = 6, ///< Shows Async Spacewarp-specific info + ovrPerfHud_VersionInfo = 5, ///< Shows SDK & HMD version Info + ovrPerfHud_Count = 7, ///< \internal Count of enumerated elements. + ovrPerfHud_EnumSize = 0x7fffffff ///< \internal Force type int32_t. +} ovrPerfHudMode; + +/// Layer HUD enables the HMD user to see information about a layer +/// +/// App can toggle layer HUD modes as such: +/// \code{.cpp} +/// ovrLayerHudMode LayerHudMode = ovrLayerHud_Info; +/// ovr_SetInt(session, OVR_LAYER_HUD_MODE, (int)LayerHudMode); +/// \endcode +/// +typedef enum ovrLayerHudMode_ { + ovrLayerHud_Off = 0, ///< Turns off the layer HUD + ovrLayerHud_Info = 1, ///< Shows info about a specific layer + ovrLayerHud_EnumSize = 0x7fffffff +} ovrLayerHudMode; + +///@} + +/// Debug HUD is provided to help developers gauge and debug the fidelity of their app's +/// stereo rendering characteristics. Using the provided quad and crosshair guides, +/// the developer can verify various aspects such as VR tracking units (e.g. meters), +/// stereo camera-parallax properties (e.g. making sure objects at infinity are rendered +/// with the proper separation), measuring VR geometry sizes and distances and more. +/// +/// App can toggle the debug HUD modes as such: +/// \code{.cpp} +/// ovrDebugHudStereoMode DebugHudMode = ovrDebugHudStereo_QuadWithCrosshair; +/// ovr_SetInt(session, OVR_DEBUG_HUD_STEREO_MODE, (int)DebugHudMode); +/// \endcode +/// +/// The app can modify the visual properties of the stereo guide (i.e. quad, crosshair) +/// using the ovr_SetFloatArray function. For a list of tweakable properties, +/// see the OVR_DEBUG_HUD_STEREO_GUIDE_* keys in the OVR_CAPI_Keys.h header file. +typedef enum ovrDebugHudStereoMode_ { + /// Turns off the Stereo Debug HUD. + ovrDebugHudStereo_Off = 0, + + /// Renders Quad in world for Stereo Debugging. + ovrDebugHudStereo_Quad = 1, + + /// Renders Quad+crosshair in world for Stereo Debugging + ovrDebugHudStereo_QuadWithCrosshair = 2, + + /// Renders screen-space crosshair at infinity for Stereo Debugging + ovrDebugHudStereo_CrosshairAtInfinity = 3, + + /// \internal Count of enumerated elements + ovrDebugHudStereo_Count, + + ovrDebugHudStereo_EnumSize = 0x7fffffff ///< \internal Force type int32_t +} ovrDebugHudStereoMode; + +#if !defined(OVR_EXPORTING_CAPI) + +// ----------------------------------------------------------------------------------- +/// @name Property Access +/// +/// These functions read and write OVR properties. Supported properties +/// are defined in OVR_CAPI_Keys.h +/// +//@{ + +/// Reads a boolean property. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] propertyName The name of the property, which needs to be valid for only the call. +/// \param[in] defaultVal specifes the value to return if the property couldn't be read. +/// \return Returns the property interpreted as a boolean value. Returns defaultVal if +/// the property doesn't exist. +OVR_PUBLIC_FUNCTION(ovrBool) +ovr_GetBool(ovrSession session, const char* propertyName, ovrBool defaultVal); + +/// Writes or creates a boolean property. +/// If the property wasn't previously a boolean property, it is changed to a boolean property. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] propertyName The name of the property, which needs to be valid only for the call. +/// \param[in] value The value to write. +/// \return Returns true if successful, otherwise false. A false result should only occur if the +/// property +/// name is empty or if the property is read-only. +OVR_PUBLIC_FUNCTION(ovrBool) +ovr_SetBool(ovrSession session, const char* propertyName, ovrBool value); + +/// Reads an integer property. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] propertyName The name of the property, which needs to be valid only for the call. +/// \param[in] defaultVal Specifes the value to return if the property couldn't be read. +/// \return Returns the property interpreted as an integer value. Returns defaultVal if +/// the property doesn't exist. +OVR_PUBLIC_FUNCTION(int) ovr_GetInt(ovrSession session, const char* propertyName, int defaultVal); + +/// Writes or creates an integer property. +/// +/// If the property wasn't previously a boolean property, it is changed to an integer property. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] propertyName The name of the property, which needs to be valid only for the call. +/// \param[in] value The value to write. +/// \return Returns true if successful, otherwise false. A false result should only occur if the +/// property name is empty or if the property is read-only. +OVR_PUBLIC_FUNCTION(ovrBool) ovr_SetInt(ovrSession session, const char* propertyName, int value); + +/// Reads a float property. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] propertyName The name of the property, which needs to be valid only for the call. +/// \param[in] defaultVal specifes the value to return if the property couldn't be read. +/// \return Returns the property interpreted as an float value. Returns defaultVal if +/// the property doesn't exist. +OVR_PUBLIC_FUNCTION(float) +ovr_GetFloat(ovrSession session, const char* propertyName, float defaultVal); + +/// Writes or creates a float property. +/// If the property wasn't previously a float property, it's changed to a float property. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] propertyName The name of the property, which needs to be valid only for the call. +/// \param[in] value The value to write. +/// \return Returns true if successful, otherwise false. A false result should only occur if the +/// property name is empty or if the property is read-only. +OVR_PUBLIC_FUNCTION(ovrBool) +ovr_SetFloat(ovrSession session, const char* propertyName, float value); + +/// Reads a float array property. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] propertyName The name of the property, which needs to be valid only for the call. +/// \param[in] values An array of float to write to. +/// \param[in] valuesCapacity Specifies the maximum number of elements to write to the values array. +/// \return Returns the number of elements read, or 0 if property doesn't exist or is empty. +OVR_PUBLIC_FUNCTION(unsigned int) +ovr_GetFloatArray( + ovrSession session, + const char* propertyName, + float values[], + unsigned int valuesCapacity); + +/// Writes or creates a float array property. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] propertyName The name of the property, which needs to be valid only for the call. +/// \param[in] values An array of float to write from. +/// \param[in] valuesSize Specifies the number of elements to write. +/// \return Returns true if successful, otherwise false. A false result should only occur if the +/// property name is empty or if the property is read-only. +OVR_PUBLIC_FUNCTION(ovrBool) +ovr_SetFloatArray( + ovrSession session, + const char* propertyName, + const float values[], + unsigned int valuesSize); + +/// Reads a string property. +/// Strings are UTF8-encoded and null-terminated. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] propertyName The name of the property, which needs to be valid only for the call. +/// \param[in] defaultVal Specifes the value to return if the property couldn't be read. +/// \return Returns the string property if it exists. Otherwise returns defaultVal, which can be +/// specified as NULL. The return memory is guaranteed to be valid until next call to +/// ovr_GetString or until the session is destroyed, whichever occurs first. +OVR_PUBLIC_FUNCTION(const char*) +ovr_GetString(ovrSession session, const char* propertyName, const char* defaultVal); + +/// Writes or creates a string property. +/// Strings are UTF8-encoded and null-terminated. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] propertyName The name of the property, which needs to be valid only for the call. +/// \param[in] value The string property, which only needs to be valid for the duration of the call. +/// \return Returns true if successful, otherwise false. A false result should only occur if the +/// property name is empty or if the property is read-only. +OVR_PUBLIC_FUNCTION(ovrBool) +ovr_SetString(ovrSession session, const char* propertyName, const char* value); + +///@} + +#endif // !defined(OVR_EXPORTING_CAPI) + +#ifdef __cplusplus +} // extern "C" +#endif + +#if defined(_MSC_VER) +#pragma warning(pop) +#endif + +/// @cond DoxygenIgnore + + +OVR_STATIC_ASSERT( + sizeof(ovrTextureSwapChainDesc) == 10 * 4, + "ovrTextureSwapChainDesc size mismatch"); + +// ----------------------------------------------------------------------------------- +// ***** Backward compatibility #includes +// +// This is at the bottom of this file because the following is dependent on the +// declarations above. + +#if !defined(OVR_CAPI_NO_UTILS) +#include "Extras/OVR_CAPI_Util.h" +#endif + +/// @endcond + +#endif // OVR_CAPI_h diff --git a/Include/OVR_CAPI_Audio.h b/Include/OVR_CAPI_Audio.h new file mode 100755 index 0000000..5cf1cc9 --- /dev/null +++ b/Include/OVR_CAPI_Audio.h @@ -0,0 +1,85 @@ +/********************************************************************************/ /** + \file OVR_CAPI_Audio.h + \brief CAPI audio functions. + \copyright Copyright 2015 Oculus VR, LLC. All Rights reserved. + ************************************************************************************/ + +#ifndef OVR_CAPI_Audio_h +#define OVR_CAPI_Audio_h + +#ifdef _WIN32 +// Prevents from defining min() and max() macro symbols. +#ifndef NOMINMAX +#define NOMINMAX +#endif +#include +#include "OVR_CAPI.h" +#define OVR_AUDIO_MAX_DEVICE_STR_SIZE 128 + +#if !defined(OVR_EXPORTING_CAPI) + +/// Gets the ID of the preferred VR audio output device. +/// +/// \param[out] deviceOutId The ID of the user's preferred VR audio device to use, +/// which will be valid upon a successful return value, else it will be WAVE_MAPPER. +/// +/// \return Returns an ovrResult indicating success or failure. In the case of failure, use +/// ovr_GetLastErrorInfo to get more information. +/// +OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetAudioDeviceOutWaveId(UINT* deviceOutId); + +/// Gets the ID of the preferred VR audio input device. +/// +/// \param[out] deviceInId The ID of the user's preferred VR audio device to use, +/// which will be valid upon a successful return value, else it will be WAVE_MAPPER. +/// +/// \return Returns an ovrResult indicating success or failure. In the case of failure, use +/// ovr_GetLastErrorInfo to get more information. +/// +OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetAudioDeviceInWaveId(UINT* deviceInId); + +/// Gets the GUID of the preferred VR audio device as a string. +/// +/// \param[out] deviceOutStrBuffer A buffer where the GUID string for the device will copied to. +/// +/// \return Returns an ovrResult indicating success or failure. In the case of failure, use +/// ovr_GetLastErrorInfo to get more information. +/// +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_GetAudioDeviceOutGuidStr(WCHAR deviceOutStrBuffer[OVR_AUDIO_MAX_DEVICE_STR_SIZE]); + +/// Gets the GUID of the preferred VR audio device. +/// +/// \param[out] deviceOutGuid The GUID of the user's preferred VR audio device to use, +/// which will be valid upon a successful return value, else it will be NULL. +/// +/// \return Returns an ovrResult indicating success or failure. In the case of failure, use +/// ovr_GetLastErrorInfo to get more information. +/// +OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetAudioDeviceOutGuid(GUID* deviceOutGuid); + +/// Gets the GUID of the preferred VR microphone device as a string. +/// +/// \param[out] deviceInStrBuffer A buffer where the GUID string for the device will copied to. +/// +/// \return Returns an ovrResult indicating success or failure. In the case of failure, use +/// ovr_GetLastErrorInfo to get more information. +/// +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_GetAudioDeviceInGuidStr(WCHAR deviceInStrBuffer[OVR_AUDIO_MAX_DEVICE_STR_SIZE]); + +/// Gets the GUID of the preferred VR microphone device. +/// +/// \param[out] deviceInGuid The GUID of the user's preferred VR audio device to use, +/// which will be valid upon a successful return value, else it will be NULL. +/// +/// \return Returns an ovrResult indicating success or failure. In the case of failure, use +/// ovr_GetLastErrorInfo to get more information. +/// +OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetAudioDeviceInGuid(GUID* deviceInGuid); + +#endif // !defined(OVR_EXPORTING_CAPI) + +#endif // OVR_OS_MS + +#endif // OVR_CAPI_Audio_h diff --git a/Include/OVR_CAPI_D3D.h b/Include/OVR_CAPI_D3D.h new file mode 100755 index 0000000..3440b6a --- /dev/null +++ b/Include/OVR_CAPI_D3D.h @@ -0,0 +1,203 @@ +/********************************************************************************/ /** + \file OVR_CAPI_D3D.h + \brief D3D specific structures used by the CAPI interface. + \copyright Copyright 2014-2016 Oculus VR, LLC All Rights reserved. + ************************************************************************************/ + +#ifndef OVR_CAPI_D3D_h +#define OVR_CAPI_D3D_h + +#include "OVR_CAPI.h" +#include "OVR_Version.h" + + +#if defined(_WIN32) +#include +#include + +#if !defined(OVR_EXPORTING_CAPI) + +//----------------------------------------------------------------------------------- +// ***** Direct3D Specific + +/// Create Texture Swap Chain suitable for use with Direct3D 11 and 12. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] d3dPtr Specifies the application's D3D11Device to create resources with +/// or the D3D12CommandQueue which must be the same one the application renders +/// to the eye textures with. +/// \param[in] desc Specifies requested texture properties. See notes for more info +/// about texture format. +/// \param[in] bindFlags Specifies what ovrTextureBindFlags the application requires +/// for this texture chain. +/// \param[out] out_TextureSwapChain Returns the created ovrTextureSwapChain, which will +/// be valid upon a successful return value, else it will be NULL. +/// This texture chain must be eventually destroyed via ovr_DestroyTextureSwapChain +/// before destroying the session with ovr_Destroy. +/// +/// \return Returns an ovrResult indicating success or failure. In the case of failure, use +/// ovr_GetLastErrorInfo to get more information. +/// +/// \note The texture format provided in \a desc should be thought of as the format the +/// distortion-compositor will use for the ShaderResourceView when reading the contents of +/// the texture. To that end, it is highly recommended that the application requests texture +// swapchain formats that are in sRGB-space (e.g. OVR_FORMAT_R8G8B8A8_UNORM_SRGB) +/// as the compositor does sRGB-correct rendering. As such, the compositor relies on the +/// GPU's hardware sampler to do the sRGB-to-linear conversion. If the application still +/// prefers to render to a linear format (e.g. OVR_FORMAT_R8G8B8A8_UNORM) while handling the +/// linear-to-gamma conversion via HLSL code, then the application must still request the +/// corresponding sRGB format and also use the \a ovrTextureMisc_DX_Typeless flag in the +/// ovrTextureSwapChainDesc's Flag field. This will allow the application to create +/// a RenderTargetView that is the desired linear format while the compositor continues to +/// treat it as sRGB. Failure to do so will cause the compositor to apply unexpected gamma +/// conversions leading to gamma-curve artifacts. The \a ovrTextureMisc_DX_Typeless +/// flag for depth buffer formats (e.g. OVR_FORMAT_D32_FLOAT) is ignored as they are always +/// converted to be typeless. +/// +/// \see ovr_GetTextureSwapChainLength +/// \see ovr_GetTextureSwapChainCurrentIndex +/// \see ovr_GetTextureSwapChainDesc +/// \see ovr_GetTextureSwapChainBufferDX +/// \see ovr_DestroyTextureSwapChain +/// +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_CreateTextureSwapChainDX( + ovrSession session, + IUnknown* d3dPtr, + const ovrTextureSwapChainDesc* desc, + ovrTextureSwapChain* out_TextureSwapChain); + +/// Get a specific buffer within the chain as any compatible COM interface (similar to +/// QueryInterface) +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] chain Specifies an ovrTextureSwapChain previously returned +/// by ovr_CreateTextureSwapChainDX +/// \param[in] index Specifies the index within the chain to retrieve. +/// Must be between 0 and length (see ovr_GetTextureSwapChainLength), +/// or may pass -1 to get the buffer at the CurrentIndex location. (Saving a call to +/// GetTextureSwapChainCurrentIndex) +/// \param[in] iid Specifies the interface ID of the interface pointer to query the buffer for. +/// \param[out] out_Buffer Returns the COM interface pointer retrieved. +/// +/// \return Returns an ovrResult indicating success or failure. In the case of failure, use +/// ovr_GetLastErrorInfo to get more information. +/// +/// Example code +/// \code{.cpp} +/// ovr_GetTextureSwapChainBufferDX(session, chain, 0, IID_ID3D11Texture2D, &d3d11Texture); +/// ovr_GetTextureSwapChainBufferDX(session, chain, 1, IID_PPV_ARGS(&dxgiResource)); +/// \endcode +/// +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_GetTextureSwapChainBufferDX( + ovrSession session, + ovrTextureSwapChain chain, + int index, + IID iid, + void** out_Buffer); + +/// Create Mirror Texture which is auto-refreshed to mirror Rift contents produced by this +/// application. +/// +/// A second call to ovr_CreateMirrorTextureWithOptionsDX for a given ovrSession before destroying +/// the first one is not supported and will result in an error return. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] d3dPtr Specifies the application's D3D11Device to create resources with +/// or the D3D12CommandQueue which must be the same one the application renders to +/// the textures with. +/// \param[in] desc Specifies requested texture properties. +/// See notes for more info about texture format. +/// \param[out] out_MirrorTexture Returns the created ovrMirrorTexture, which will be valid upon a +/// successful return value, else it will be NULL. +/// This texture must be eventually destroyed via ovr_DestroyMirrorTexture before +/// destroying the session with ovr_Destroy. +/// +/// \return Returns an ovrResult indicating success or failure. In the case of failure, use +/// ovr_GetLastErrorInfo to get more information. +/// +/// \note The texture format provided in \a desc should be thought of as the format the compositor +/// will use for the RenderTargetView when writing into mirror texture. To that end, it is +/// highly recommended that the application requests a mirror texture format that is +/// in sRGB-space (e.g. OVR_FORMAT_R8G8B8A8_UNORM_SRGB) as the compositor does sRGB-correct +/// rendering. If however the application wants to still read the mirror texture as a linear +/// format (e.g. OVR_FORMAT_R8G8B8A8_UNORM) and handle the sRGB-to-linear conversion in +/// HLSL code, then it is recommended the application still requests an sRGB format and also +/// use the \a ovrTextureMisc_DX_Typeless flag in the ovrMirrorTextureDesc's Flags field. +/// This will allow the application to bind a ShaderResourceView that is a linear format +/// while the compositor continues to treat is as sRGB. Failure to do so will cause the +/// compositor to apply unexpected gamma conversions leading to gamma-curve artifacts. +/// +/// +/// Example code +/// \code{.cpp} +/// ovrMirrorTexture mirrorTexture = nullptr; +/// ovrMirrorTextureDesc mirrorDesc = {}; +/// mirrorDesc.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB; +/// mirrorDesc.Width = mirrorWindowWidth; +/// mirrorDesc.Height = mirrorWindowHeight; +/// ovrResult result = ovr_CreateMirrorTextureWithOptionsDX(session, d3d11Device, +/// &mirrorDesc, &mirrorTexture); +/// [...] +/// // Destroy the texture when done with it. +/// ovr_DestroyMirrorTexture(session, mirrorTexture); +/// mirrorTexture = nullptr; +/// \endcode +/// +/// \see ovr_GetMirrorTextureBufferDX +/// \see ovr_DestroyMirrorTexture +/// +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_CreateMirrorTextureWithOptionsDX( + ovrSession session, + IUnknown* d3dPtr, + const ovrMirrorTextureDesc* desc, + ovrMirrorTexture* out_MirrorTexture); + +/// Deprecated. Use ovr_CreateMirrorTextureWithOptionsDX instead +/// +/// Same as ovr_CreateMirrorTextureWithOptionsDX except doesn't use ovrMirrorOptions flags as part +/// of ovrMirrorTextureDesc's MirrorOptions field, and defaults to ovrMirrorOption_PostDistortion +/// +/// \see ovrMirrorOptions, ovr_CreateMirrorTextureWithOptionsDX +/// +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_CreateMirrorTextureDX( + ovrSession session, + IUnknown* d3dPtr, + const ovrMirrorTextureDesc* desc, + ovrMirrorTexture* out_MirrorTexture); + +/// Get a the underlying buffer as any compatible COM interface (similar to QueryInterface) +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] mirrorTexture Specifies an ovrMirrorTexture previously returned +/// by ovr_CreateMirrorTextureWithOptionsDX +/// \param[in] iid Specifies the interface ID of the interface pointer to query the buffer for. +/// \param[out] out_Buffer Returns the COM interface pointer retrieved. +/// +/// \return Returns an ovrResult indicating success or failure. In the case of failure, use +/// ovr_GetLastErrorInfo to get more information. +/// +/// Example code +/// \code{.cpp} +/// ID3D11Texture2D* d3d11Texture = nullptr; +/// ovr_GetMirrorTextureBufferDX(session, mirrorTexture, IID_PPV_ARGS(&d3d11Texture)); +/// d3d11DeviceContext->CopyResource(d3d11TextureBackBuffer, d3d11Texture); +/// d3d11Texture->Release(); +/// dxgiSwapChain->Present(0, 0); +/// \endcode +/// +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_GetMirrorTextureBufferDX( + ovrSession session, + ovrMirrorTexture mirrorTexture, + IID iid, + void** out_Buffer); + +#endif // !defined(OVR_EXPORTING_CAPI) + +#endif // _WIN32 + +#endif // OVR_CAPI_D3D_h diff --git a/Include/OVR_CAPI_GL.h b/Include/OVR_CAPI_GL.h new file mode 100755 index 0000000..bd894c1 --- /dev/null +++ b/Include/OVR_CAPI_GL.h @@ -0,0 +1,137 @@ +/********************************************************************************/ /** + \file OVR_CAPI_GL.h + \brief OpenGL-specific structures used by the CAPI interface. + \copyright Copyright 2015 Oculus VR, LLC. All Rights reserved. + ************************************************************************************/ + +#ifndef OVR_CAPI_GL_h +#define OVR_CAPI_GL_h + +#include "OVR_CAPI.h" + +#if !defined(OVR_EXPORTING_CAPI) + +/// Creates a TextureSwapChain suitable for use with OpenGL. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] desc Specifies the requested texture properties. +/// See notes for more info about texture format. +/// \param[out] out_TextureSwapChain Returns the created ovrTextureSwapChain, +/// which will be valid upon a successful return value, else it will be NULL. +/// This texture swap chain must be eventually destroyed via +// ovr_DestroyTextureSwapChain before destroying the session with ovr_Destroy. +/// +/// \return Returns an ovrResult indicating success or failure. In the case of failure, use +/// ovr_GetLastErrorInfo to get more information. +/// +/// \note The \a format provided should be thought of as the format the distortion compositor will +/// use when reading the contents of the texture. To that end, it is highly recommended +/// that the application requests texture swap chain formats that are in sRGB-space +/// (e.g. OVR_FORMAT_R8G8B8A8_UNORM_SRGB) as the distortion compositor does sRGB-correct +/// rendering. Furthermore, the app should then make sure "glEnable(GL_FRAMEBUFFER_SRGB);" +/// is called before rendering into these textures. Even though it is not recommended, +/// if the application would like to treat the texture as a linear format and do +/// linear-to-gamma conversion in GLSL, then the application can avoid +/// calling "glEnable(GL_FRAMEBUFFER_SRGB);", but should still pass in an sRGB variant for +/// the \a format. Failure to do so will cause the distortion compositor to apply incorrect +/// gamma conversions leading to gamma-curve artifacts. +/// +/// \see ovr_GetTextureSwapChainLength +/// \see ovr_GetTextureSwapChainCurrentIndex +/// \see ovr_GetTextureSwapChainDesc +/// \see ovr_GetTextureSwapChainBufferGL +/// \see ovr_DestroyTextureSwapChain +/// +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_CreateTextureSwapChainGL( + ovrSession session, + const ovrTextureSwapChainDesc* desc, + ovrTextureSwapChain* out_TextureSwapChain); + +/// Get a specific buffer within the chain as a GL texture name +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] chain Specifies an ovrTextureSwapChain previously returned +/// by ovr_CreateTextureSwapChainGL +/// \param[in] index Specifies the index within the chain to retrieve. +/// Must be between 0 and length (see ovr_GetTextureSwapChainLength) +/// or may pass -1 to get the buffer at the CurrentIndex location. +/// (Saving a call to GetTextureSwapChainCurrentIndex) +/// \param[out] out_TexId Returns the GL texture object name associated with +/// the specific index requested +/// +/// \return Returns an ovrResult indicating success or failure. +/// In the case of failure, use ovr_GetLastErrorInfo to get more information. +/// +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_GetTextureSwapChainBufferGL( + ovrSession session, + ovrTextureSwapChain chain, + int index, + unsigned int* out_TexId); + +/// Creates a Mirror Texture which is auto-refreshed to mirror Rift contents produced by this +/// application. +/// +/// A second call to ovr_CreateMirrorTextureWithOptionsGL for a given ovrSession before destroying +/// the first one is not supported and will result in an error return. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] desc Specifies the requested mirror texture description. +/// \param[out] out_MirrorTexture Specifies the created ovrMirrorTexture, which will be +/// valid upon a successful return value, else it will be NULL. +/// This texture must be eventually destroyed via ovr_DestroyMirrorTexture before +/// destroying the session with ovr_Destroy. +/// +/// \return Returns an ovrResult indicating success or failure. In the case of failure, use +/// ovr_GetLastErrorInfo to get more information. +/// +/// \note The \a format provided should be thought of as the format the distortion compositor will +/// use when writing into the mirror texture. It is highly recommended that mirror textures +// are requested as sRGB formats because the distortion compositor does sRGB-correct +/// rendering. If the application requests a non-sRGB format (e.g. R8G8B8A8_UNORM) as the +/// mirror texture, then the application might have to apply a manual linear-to-gamma +/// conversion when reading from the mirror texture. Failure to do so can result in +// incorrect gamma conversions leading to gamma-curve artifacts and color banding. +/// +/// \see ovr_GetMirrorTextureBufferGL +/// \see ovr_DestroyMirrorTexture +/// +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_CreateMirrorTextureWithOptionsGL( + ovrSession session, + const ovrMirrorTextureDesc* desc, + ovrMirrorTexture* out_MirrorTexture); + +/// Deprecated. Use ovr_CreateMirrorTextureWithOptionsGL instead +/// +/// Same as ovr_CreateMirrorTextureWithOptionsGL except doesn't use ovrMirrorOptions flags as part +/// of ovrMirrorTextureDesc's MirrorOptions field, and defaults to ovrMirrorOption_PostDistortion +/// +/// \see ovrMirrorOptions, ovr_CreateMirrorTextureWithOptionsGL +/// +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_CreateMirrorTextureGL( + ovrSession session, + const ovrMirrorTextureDesc* desc, + ovrMirrorTexture* out_MirrorTexture); + +/// Get a the underlying buffer as a GL texture name +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] mirrorTexture Specifies an ovrMirrorTexture previously returned +// by ovr_CreateMirrorTextureWithOptionsGL +/// \param[out] out_TexId Specifies the GL texture object name associated with the mirror texture +/// +/// \return Returns an ovrResult indicating success or failure. In the case of failure, use +/// ovr_GetLastErrorInfo to get more information. +/// +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_GetMirrorTextureBufferGL( + ovrSession session, + ovrMirrorTexture mirrorTexture, + unsigned int* out_TexId); + +#endif // !defined(OVR_EXPORTING_CAPI) + +#endif // OVR_CAPI_GL_h diff --git a/Include/OVR_CAPI_Keys.h b/Include/OVR_CAPI_Keys.h new file mode 100755 index 0000000..5ff611d --- /dev/null +++ b/Include/OVR_CAPI_Keys.h @@ -0,0 +1,48 @@ +/********************************************************************************/ /** + \file OVR_CAPI.h + \brief Keys for CAPI proprty function calls + \copyright Copyright 2015 Oculus VR, LLC All Rights reserved. + ************************************************************************************/ + +#ifndef OVR_CAPI_Keys_h +#define OVR_CAPI_Keys_h + +#include "OVR_Version.h" + + + +#define OVR_KEY_USER "User" // string + +#define OVR_KEY_NAME "Name" // string + +#define OVR_KEY_GENDER "Gender" // string "Male", "Female", or "Unknown" +#define OVR_DEFAULT_GENDER "Unknown" + +#define OVR_KEY_PLAYER_HEIGHT "PlayerHeight" // float meters +#define OVR_DEFAULT_PLAYER_HEIGHT 1.778f + +#define OVR_KEY_EYE_HEIGHT "EyeHeight" // float meters +#define OVR_DEFAULT_EYE_HEIGHT 1.675f + +#define OVR_KEY_NECK_TO_EYE_DISTANCE "NeckEyeDistance" // float[2] meters +#define OVR_DEFAULT_NECK_TO_EYE_HORIZONTAL 0.0805f +#define OVR_DEFAULT_NECK_TO_EYE_VERTICAL 0.075f + +#define OVR_KEY_EYE_TO_NOSE_DISTANCE "EyeToNoseDist" // float[2] meters + + + +#define OVR_PERF_HUD_MODE "PerfHudMode" // int, allowed values are defined in enum ovrPerfHudMode + +#define OVR_LAYER_HUD_MODE "LayerHudMode" // int, allowed values are defined in enum ovrLayerHudMode +#define OVR_LAYER_HUD_CURRENT_LAYER "LayerHudCurrentLayer" // int, The layer to show +#define OVR_LAYER_HUD_SHOW_ALL_LAYERS "LayerHudShowAll" // bool, Hide other layers when hud enabled + +#define OVR_DEBUG_HUD_STEREO_MODE "DebugHudStereoMode" // int, see enum ovrDebugHudStereoMode +#define OVR_DEBUG_HUD_STEREO_GUIDE_INFO_ENABLE "DebugHudStereoGuideInfoEnable" // bool +#define OVR_DEBUG_HUD_STEREO_GUIDE_SIZE "DebugHudStereoGuideSize2f" // float[2] +#define OVR_DEBUG_HUD_STEREO_GUIDE_POSITION "DebugHudStereoGuidePosition3f" // float[3] +#define OVR_DEBUG_HUD_STEREO_GUIDE_YAWPITCHROLL "DebugHudStereoGuideYawPitchRoll3f" // float[3] +#define OVR_DEBUG_HUD_STEREO_GUIDE_COLOR "DebugHudStereoGuideColor4f" // float[4] + +#endif // OVR_CAPI_Keys_h diff --git a/Include/OVR_CAPI_Vk.h b/Include/OVR_CAPI_Vk.h new file mode 100755 index 0000000..3ac7574 --- /dev/null +++ b/Include/OVR_CAPI_Vk.h @@ -0,0 +1,286 @@ +/********************************************************************************/ /** + \file OVR_CAPI_Vk.h + \brief Vulkan specific structures used by the CAPI interface. + \copyright Copyright 2014-2017 Oculus VR, LLC All Rights reserved. + ************************************************************************************/ + +#ifndef OVR_CAPI_Vk_h +#define OVR_CAPI_Vk_h + +#include "OVR_CAPI.h" +#include "OVR_Version.h" + + +#if !defined(OVR_EXPORTING_CAPI) + +//----------------------------------------------------------------------------------- +// ***** Vulkan Specific + +/// Get a list of Vulkan vkInstance extensions required for VR. +/// +/// Returns a list of strings delimited by a single space identifying Vulkan extensions that must +/// be enabled in order for the VR runtime to support Vulkan-based applications. The returned +/// list reflects the current runtime version and the GPU the VR system is currently connected to. +/// +/// \param[in] luid Specifies the luid for the relevant GPU, which is returned from ovr_Create. +/// \param[in] extensionNames is a character buffer which will receive a list of extension name +/// strings, separated by a single space char between each extension. +/// \param[in] inoutExtensionNamesSize indicates on input the capacity of extensionNames in chars. +/// On output it returns the number of characters written to extensionNames, +/// including the terminating 0 char. In the case of this function returning +/// ovrError_InsufficientArraySize, the required inoutExtensionNamesSize is returned. +/// +/// \return Returns an ovrResult indicating success or failure. In the case of failure, use +/// ovr_GetLastErrorInfo to get more information. Returns ovrError_InsufficientArraySize in +/// the case that inoutExtensionNameSize didn't have enough space, in which case +/// inoutExtensionNameSize will return the required inoutExtensionNamesSize. +/// +/// Example code +/// \code{.cpp} +/// char extensionNames[4096]; +/// uint32_t extensionNamesSize = sizeof(extensionNames); +/// ovr_GetInstanceExtensionsVk(luid, extensionsnames, &extensionNamesSize); +/// +/// uint32_t extensionCount = 0; +/// const char* extensionNamePtrs[256]; +/// for(const char* p = extensionNames; *p; ++p) { +/// if((p == extensionNames) || (p[-1] == ' ')) { +/// extensionNamePtrs[extensionCount++] = p; +/// if (p[-1] == ' ') +/// p[-1] = '\0'; +/// } +/// } +/// +/// VkInstanceCreateInfo info = { ... }; +/// info.enabledExtensionCount = extensionCount; +/// info.ppEnabledExtensionNames = extensionNamePtrs; +/// [...] +/// \endcode +/// +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_GetInstanceExtensionsVk( + ovrGraphicsLuid luid, + char* extensionNames, + uint32_t* inoutExtensionNamesSize); + +/// Get a list of Vulkan vkDevice extensions required for VR. +/// +/// Returns a list of strings delimited by a single space identifying Vulkan extensions that must +/// be enabled in order for the VR runtime to support Vulkan-based applications. The returned +/// list reflects the current runtime version and the GPU the VR system is currently connected to. +/// +/// \param[in] luid Specifies the luid for the relevant GPU, which is returned from ovr_Create. +/// \param[in] extensionNames is a character buffer which will receive a list of extension name +/// strings, separated by a single space char between each extension. +/// \param[in] inoutExtensionNamesSize indicates on input the capacity of extensionNames in chars. +/// On output it returns the number of characters written to extensionNames, +/// including the terminating 0 char. In the case of this function returning +/// ovrError_InsufficientArraySize, the required inoutExtensionNamesSize is returned. +/// +/// \return Returns an ovrResult indicating success or failure. In the case of failure, use +/// ovr_GetLastErrorInfo to get more information. Returns ovrError_InsufficientArraySize in +/// the case that inoutExtensionNameSize didn't have enough space, in which case +/// inoutExtensionNameSize will return the required inoutExtensionNamesSize. +/// +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_GetDeviceExtensionsVk( + ovrGraphicsLuid luid, + char* extensionNames, + uint32_t* inoutExtensionNamesSize); + +/// Find VkPhysicalDevice matching ovrGraphicsLuid +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] luid Specifies the luid returned from ovr_Create. +/// \param[in] instance Specifies a VkInstance to search for matching luids in. +/// \param[out] out_physicalDevice Returns the VkPhysicalDevice matching the instance and luid. +/// +/// \return Returns an ovrResult indicating success or failure. In the case of failure, use +/// ovr_GetLastErrorInfo to get more information. +/// +/// \note This function enumerates the current physical devices and returns the one matching the +/// luid. It must be called at least once prior to any ovr_CreateTextureSwapChainVk or +/// ovr_CreateMirrorTextureWithOptionsVk calls, and the instance must remain valid for the lifetime +/// of the returned objects. It is assumed the VkDevice created by the application will be for the +/// returned physical device. +/// +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_GetSessionPhysicalDeviceVk( + ovrSession session, + ovrGraphicsLuid luid, + VkInstance instance, + VkPhysicalDevice* out_physicalDevice); + +/// Select VkQueue to block on till rendering is complete +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] queue Specifies a VkQueue to add a VkFence operation to and wait on. +/// +/// \return Returns an ovrResult indicating success or failure. In the case of failure, use +/// ovr_GetLastErrorInfo to get more information. +/// +/// \note The queue may be changed at any time but only the value at the time ovr_SubmitFrame +/// is called will be used. ovr_SetSynchronizationQueueVk must be called with a valid VkQueue +/// created on the same VkDevice the texture sets were created on prior to the first call to +/// ovr_SubmitFrame. An internally created VkFence object will be signalled by the completion +/// of operations on queue and waited on to synchronize the VR compositor. +/// +OVR_PUBLIC_FUNCTION(ovrResult) ovr_SetSynchronizationQueueVk(ovrSession session, VkQueue queue); +// Backwards compatibility for the original typoed version +#define ovr_SetSynchonizationQueueVk ovr_SetSynchronizationQueueVk +// Define OVR_PREVIEW_DEPRECATION to generate warnings for upcoming API deprecations +#if defined(OVR_PREVIEW_DEPRECATION) +#pragma deprecated("ovr_SetSynchonizationQueueVk") +#endif + +/// Create Texture Swap Chain suitable for use with Vulkan +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] device Specifies the application's VkDevice to create resources with. +/// \param[in] desc Specifies requested texture properties. See notes for more info +/// about texture format. +/// \param[out] out_TextureSwapChain Returns the created ovrTextureSwapChain, which will be valid +/// upon a successful return value, else it will be NULL. +/// This texture chain must be eventually destroyed via ovr_DestroyTextureSwapChain +/// before destroying the session with ovr_Destroy. +/// +/// \return Returns an ovrResult indicating success or failure. In the case of failure, use +/// ovr_GetLastErrorInfo to get more information. +/// +/// \note The texture format provided in \a desc should be thought of as the format the +/// distortion-compositor will use for the ShaderResourceView when reading the contents +/// of the texture. To that end, it is highly recommended that the application +/// requests texture swapchain formats that are in sRGB-space +/// (e.g. OVR_FORMAT_R8G8B8A8_UNORM_SRGB) as the compositor does sRGB-correct rendering. +/// As such, the compositor relies on the GPU's hardware sampler to do the sRGB-to-linear +/// conversion. If the application still prefers to render to a linear format (e.g. +/// OVR_FORMAT_R8G8B8A8_UNORM) while handling the linear-to-gamma conversion via +/// SPIRV code, then the application must still request the corresponding sRGB format and +/// also use the \a ovrTextureMisc_DX_Typeless flag in the ovrTextureSwapChainDesc's +/// Flag field. This will allow the application to create a RenderTargetView that is the +/// desired linear format while the compositor continues to treat it as sRGB. Failure to +/// do so will cause the compositor to apply unexpected gamma conversions leading to +/// gamma-curve artifacts. The \a ovrTextureMisc_DX_Typeless flag for depth buffer formats +/// (e.g. OVR_FORMAT_D32_FLOAT) is ignored as they are always +/// converted to be typeless. +/// +/// \see ovr_GetTextureSwapChainLength +/// \see ovr_GetTextureSwapChainCurrentIndex +/// \see ovr_GetTextureSwapChainDesc +/// \see ovr_GetTextureSwapChainBufferVk +/// \see ovr_DestroyTextureSwapChain +/// +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_CreateTextureSwapChainVk( + ovrSession session, + VkDevice device, + const ovrTextureSwapChainDesc* desc, + ovrTextureSwapChain* out_TextureSwapChain); + +/// Get a specific VkImage within the chain +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] chain Specifies an ovrTextureSwapChain previously returned by +/// ovr_CreateTextureSwapChainVk +/// \param[in] index Specifies the index within the chain to retrieve. +/// Must be between 0 and length (see ovr_GetTextureSwapChainLength), +/// or may pass -1 to get the buffer at the CurrentIndex location (saving a +/// call to GetTextureSwapChainCurrentIndex). +/// \param[out] out_Image Returns the VkImage retrieved. +/// +/// \return Returns an ovrResult indicating success or failure. In the case of failure, use +/// ovr_GetLastErrorInfo to get more information. +/// +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_GetTextureSwapChainBufferVk( + ovrSession session, + ovrTextureSwapChain chain, + int index, + VkImage* out_Image); + +/// Create Mirror Texture which is auto-refreshed to mirror Rift contents produced by this +/// application. +/// +/// A second call to ovr_CreateMirrorTextureWithOptionsVk for a given ovrSession before destroying +/// the first one is not supported and will result in an error return. +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] device Specifies the VkDevice to create resources with. +/// \param[in] desc Specifies requested texture properties. See notes for more info +/// about texture format. +/// \param[out] out_MirrorTexture Returns the created ovrMirrorTexture, which will be +/// valid upon a successful return value, else it will be NULL. +/// This texture must be eventually destroyed via ovr_DestroyMirrorTexture before +/// destroying the session with ovr_Destroy. +/// +/// \return Returns an ovrResult indicating success or failure. In the case of failure, use +/// ovr_GetLastErrorInfo to get more information. +/// +/// \note The texture format provided in \a desc should be thought of as the format the +/// compositor will use for the VkImageView when writing into mirror texture. To that end, +/// it is highly recommended that the application requests a mirror texture format that is +/// in sRGB-space (e.g. OVR_FORMAT_R8G8B8A8_UNORM_SRGB) as the compositor does sRGB-correct +/// rendering. If however the application wants to still read the mirror texture as a +/// linear format (e.g. OVR_FORMAT_R8G8B8A8_UNORM) and handle the sRGB-to-linear conversion +/// in SPIRV code, then it is recommended the application still requests an sRGB format and +/// also use the \a ovrTextureMisc_DX_Typeless flag in the ovrMirrorTextureDesc's +/// Flags field. This will allow the application to bind a ShaderResourceView that is a +/// linear format while the compositor continues to treat is as sRGB. Failure to do so will +/// cause the compositor to apply unexpected gamma conversions leading to +/// gamma-curve artifacts. +/// +/// Example code +/// \code{.cpp} +/// ovrMirrorTexture mirrorTexture = nullptr; +/// ovrMirrorTextureDesc mirrorDesc = {}; +/// mirrorDesc.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB; +/// mirrorDesc.Width = mirrorWindowWidth; +/// mirrorDesc.Height = mirrorWindowHeight; +/// ovrResult result = ovr_CreateMirrorTextureWithOptionsVk(session, vkDevice, &mirrorDesc, +/// &mirrorTexture); +/// [...] +/// // Destroy the texture when done with it. +/// ovr_DestroyMirrorTexture(session, mirrorTexture); +/// mirrorTexture = nullptr; +/// \endcode +/// +/// \see ovr_GetMirrorTextureBufferVk +/// \see ovr_DestroyMirrorTexture +/// +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_CreateMirrorTextureWithOptionsVk( + ovrSession session, + VkDevice device, + const ovrMirrorTextureDesc* desc, + ovrMirrorTexture* out_MirrorTexture); + +/// Get a the underlying mirror VkImage +/// +/// \param[in] session Specifies an ovrSession previously returned by ovr_Create. +/// \param[in] mirrorTexture Specifies an ovrMirrorTexture previously returned by +/// ovr_CreateMirrorTextureWithOptionsVk +/// \param[out] out_Image Returns the VkImage pointer retrieved. +/// +/// \return Returns an ovrResult indicating success or failure. In the case of failure, use +/// ovr_GetLastErrorInfo to get more information. +/// +/// Example code +/// \code{.cpp} +/// VkImage mirrorImage = VK_NULL_HANDLE; +/// ovr_GetMirrorTextureBufferVk(session, mirrorTexture, &mirrorImage); +/// ... +/// vkCmdBlitImage(commandBuffer, mirrorImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, +/// presentImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion, VK_FILTER_LINEAR); +/// ... +/// vkQueuePresentKHR(queue, &presentInfo); +/// \endcode +/// +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_GetMirrorTextureBufferVk( + ovrSession session, + ovrMirrorTexture mirrorTexture, + VkImage* out_Image); + +#endif // !defined(OVR_EXPORTING_CAPI) + +#endif // OVR_CAPI_Vk_h diff --git a/Include/OVR_ErrorCode.h b/Include/OVR_ErrorCode.h new file mode 100755 index 0000000..db0580a --- /dev/null +++ b/Include/OVR_ErrorCode.h @@ -0,0 +1,321 @@ +/********************************************************************************/ /** + \file OVR_ErrorCode.h + \brief This header provides LibOVR error code declarations. + \copyright Copyright 2015-2016 Oculus VR, LLC All Rights reserved. + *************************************************************************************/ + +#ifndef OVR_ErrorCode_h +#define OVR_ErrorCode_h + +#include "OVR_Version.h" +#include + + + +#ifndef OVR_RESULT_DEFINED +#define OVR_RESULT_DEFINED ///< Allows ovrResult to be independently defined. +/// API call results are represented at the highest level by a single ovrResult. +typedef int32_t ovrResult; +#endif + +/// \brief Indicates if an ovrResult indicates success. +/// +/// Some functions return additional successful values other than ovrSucces and +/// require usage of this macro to indicate successs. +/// +#if !defined(OVR_SUCCESS) +#define OVR_SUCCESS(result) (result >= 0) +#endif + +/// \brief Indicates if an ovrResult indicates an unqualified success. +/// +/// This is useful for indicating that the code intentionally wants to +/// check for result == ovrSuccess as opposed to OVR_SUCCESS(), which +/// checks for result >= ovrSuccess. +/// +#if !defined(OVR_UNQUALIFIED_SUCCESS) +#define OVR_UNQUALIFIED_SUCCESS(result) (result == ovrSuccess) +#endif + +/// \brief Indicates if an ovrResult indicates failure. +/// +#if !defined(OVR_FAILURE) +#define OVR_FAILURE(result) (!OVR_SUCCESS(result)) +#endif + +// Success is a value greater or equal to 0, while all error types are negative values. +#ifndef OVR_SUCCESS_DEFINED +#define OVR_SUCCESS_DEFINED ///< Allows ovrResult to be independently defined. +typedef enum ovrSuccessType_ { + /// This is a general success result. Use OVR_SUCCESS to test for success. + ovrSuccess = 0, +} ovrSuccessType; +#endif + +// Public success types +// Success is a value greater or equal to 0, while all error types are negative values. +typedef enum ovrSuccessTypes_ { + /// Returned from a call to SubmitFrame. The call succeeded, but what the app + /// rendered will not be visible on the HMD. Ideally the app should continue + /// calling SubmitFrame, but not do any rendering. When the result becomes + /// ovrSuccess, rendering should continue as usual. + ovrSuccess_NotVisible = 1000, + + /// Boundary is invalid due to sensor change or was not setup. + ovrSuccess_BoundaryInvalid = 1001, + + /// Device is not available for the requested operation. + ovrSuccess_DeviceUnavailable = 1002, +} ovrSuccessTypes; + +// Public error types +typedef enum ovrErrorType_ { + /******************/ + /* General errors */ + /******************/ + + /// Failure to allocate memory. + ovrError_MemoryAllocationFailure = -1000, + + /// Invalid ovrSession parameter provided. + ovrError_InvalidSession = -1002, + + /// The operation timed out. + ovrError_Timeout = -1003, + + /// The system or component has not been initialized. + ovrError_NotInitialized = -1004, + + /// Invalid parameter provided. See error info or log for details. + ovrError_InvalidParameter = -1005, + + /// Generic service error. See error info or log for details. + ovrError_ServiceError = -1006, + + /// The given HMD doesn't exist. + ovrError_NoHmd = -1007, + + /// Function call is not supported on this hardware/software + ovrError_Unsupported = -1009, + + /// Specified device type isn't available. + ovrError_DeviceUnavailable = -1010, + + /// The headset was in an invalid orientation for the requested + /// operation (e.g. vertically oriented during ovr_RecenterPose). + ovrError_InvalidHeadsetOrientation = -1011, + + /// The client failed to call ovr_Destroy on an active session before calling ovr_Shutdown. + /// Or the client crashed. + ovrError_ClientSkippedDestroy = -1012, + + /// The client failed to call ovr_Shutdown or the client crashed. + ovrError_ClientSkippedShutdown = -1013, + + ///< The service watchdog discovered a deadlock. + ovrError_ServiceDeadlockDetected = -1014, + + ///< Function call is invalid for object's current state + ovrError_InvalidOperation = -1015, + + ///< Increase size of output array + ovrError_InsufficientArraySize = -1016, + + /// There is not any external camera information stored by ovrServer. + ovrError_NoExternalCameraInfo = -1017, + + /// Tracking is lost when ovr_GetDevicePoses() is called. + ovrError_LostTracking = -1018, + + /// There was a problem initializing the external camera for capture + ovrError_ExternalCameraInitializedFailed = -1019, + + /// There was a problem capturing external camera frames + ovrError_ExternalCameraCaptureFailed = -1020, + + /// The external camera friendly name list and the external camera name list + /// are not the fixed size(OVR_MAX_EXTERNAL_CAMERA_NAME_BUFFER_SIZE). + ovrError_ExternalCameraNameListsBufferSize = -1021, + + /// The external camera friendly name list is not the same size as + /// the external camera name list. + ovrError_ExternalCameraNameListsMistmatch = -1022, + + /// The external camera property has not been sent to OVRServer + /// when the user tries to open the camera. + ovrError_ExternalCameraNotCalibrated = -1023, + + /// The external camera name is larger than OVR_EXTERNAL_CAMERA_NAME_SIZE-1 + ovrError_ExternalCameraNameWrongSize = -1024, + + /*************************************************/ + /* Audio error range, reserved for Audio errors. */ + /*************************************************/ + + /// Failure to find the specified audio device. + ovrError_AudioDeviceNotFound = -2001, + + /// Generic COM error. + ovrError_AudioComError = -2002, + + /**************************/ + /* Initialization errors. */ + /**************************/ + + /// Generic initialization error. + ovrError_Initialize = -3000, + + /// Couldn't load LibOVRRT. + ovrError_LibLoad = -3001, + + /// LibOVRRT version incompatibility. + ovrError_LibVersion = -3002, + + /// Couldn't connect to the OVR Service. + ovrError_ServiceConnection = -3003, + + /// OVR Service version incompatibility. + ovrError_ServiceVersion = -3004, + + /// The operating system version is incompatible. + ovrError_IncompatibleOS = -3005, + + /// Unable to initialize the HMD display. + ovrError_DisplayInit = -3006, + + /// Unable to start the server. Is it already running? + ovrError_ServerStart = -3007, + + /// Attempting to re-initialize with a different version. + ovrError_Reinitialization = -3008, + + /// Chosen rendering adapters between client and service do not match + ovrError_MismatchedAdapters = -3009, + + /// Calling application has leaked resources + ovrError_LeakingResources = -3010, + + /// Client version too old to connect to service + ovrError_ClientVersion = -3011, + + /// The operating system is out of date. + ovrError_OutOfDateOS = -3012, + + /// The graphics driver is out of date. + ovrError_OutOfDateGfxDriver = -3013, + + /// The graphics hardware is not supported + ovrError_IncompatibleGPU = -3014, + + /// No valid VR display system found. + ovrError_NoValidVRDisplaySystem = -3015, + + /// Feature or API is obsolete and no longer supported. + ovrError_Obsolete = -3016, + + /// No supported VR display system found, but disabled or driverless adapter found. + ovrError_DisabledOrDefaultAdapter = -3017, + + /// The system is using hybrid graphics (Optimus, etc...), which is not support. + ovrError_HybridGraphicsNotSupported = -3018, + + /// Initialization of the DisplayManager failed. + ovrError_DisplayManagerInit = -3019, + + /// Failed to get the interface for an attached tracker + ovrError_TrackerDriverInit = -3020, + + /// LibOVRRT signature check failure. + ovrError_LibSignCheck = -3021, + + /// LibOVRRT path failure. + ovrError_LibPath = -3022, + + /// LibOVRRT symbol resolution failure. + ovrError_LibSymbols = -3023, + + /// Failed to connect to the service because remote connections to the service are not allowed. + ovrError_RemoteSession = -3024, + + /// Vulkan initialization error. + ovrError_InitializeVulkan = -3025, + + /// The graphics driver is black-listed. + ovrError_BlacklistedGfxDriver = -3026, + + /********************/ + /* Rendering errors */ + /********************/ + + /// In the event of a system-wide graphics reset or cable unplug this is returned to the app. + ovrError_DisplayLost = -6000, + + /// ovr_CommitTextureSwapChain was called too many times on a texture swapchain without + /// calling submit to use the chain. + ovrError_TextureSwapChainFull = -6001, + + /// The ovrTextureSwapChain is in an incomplete or inconsistent state. + /// Ensure ovr_CommitTextureSwapChain was called at least once first. + ovrError_TextureSwapChainInvalid = -6002, + + /// Graphics device has been reset (TDR, etc...) + ovrError_GraphicsDeviceReset = -6003, + + /// HMD removed from the display adapter + ovrError_DisplayRemoved = -6004, + + /// Content protection is not available for the display. + ovrError_ContentProtectionNotAvailable = -6005, + + /// Application declared itself as an invisible type and is not allowed to submit frames. + ovrError_ApplicationInvisible = -6006, + + /// The given request is disallowed under the current conditions. + ovrError_Disallowed = -6007, + + /// Display portion of HMD is plugged into an incompatible port (ex: IGP) + ovrError_DisplayPluggedIncorrectly = -6008, + + /// Returned in the event a virtual display system reaches a display limit + ovrError_DisplayLimitReached = -6009, + + /****************/ + /* Fatal errors */ + /****************/ + + ///< A runtime exception occurred. The application is required to shutdown LibOVR and + /// re-initialize it before this error state will be cleared. + ovrError_RuntimeException = -7000, + + /**********************/ + /* Calibration errors */ + /**********************/ + + /// Result of a missing calibration block + ovrError_NoCalibration = -9000, + + /// Result of an old calibration block + ovrError_OldVersion = -9001, + + /// Result of a bad calibration block due to lengths + ovrError_MisformattedBlock = -9002, + +/****************/ +/* Other errors */ +/****************/ + + +} ovrErrorType; + +/// Provides information about the last error. +/// \see ovr_GetLastErrorInfo +typedef struct ovrErrorInfo_ { + /// The result from the last API call that generated an error ovrResult. + ovrResult Result; + + /// A UTF8-encoded null-terminated English string describing the problem. + /// The format of this string is subject to change in future versions. + char ErrorString[512]; +} ovrErrorInfo; + +#endif /* OVR_ErrorCode_h */ diff --git a/Include/OVR_Version.h b/Include/OVR_Version.h new file mode 100755 index 0000000..01bfdcb --- /dev/null +++ b/Include/OVR_Version.h @@ -0,0 +1,60 @@ +/************************************************************************************* + \file OVR_Version.h + \brief This header provides LibOVR version identification. + \copyright Copyright 2014-2016 Oculus VR, LLC All Rights reserved. + *************************************************************************************/ + +#ifndef OVR_Version_h +#define OVR_Version_h + + +/// Conventional string-ification macro. +#if !defined(OVR_STRINGIZE) +#define OVR_STRINGIZEIMPL(x) #x +#define OVR_STRINGIZE(x) OVR_STRINGIZEIMPL(x) +#endif + +// Master version numbers +#define OVR_PRODUCT_VERSION 1 // Product version doesn't participate in semantic versioning. +#define OVR_MAJOR_VERSION 1 // If you change these values then you need to also make sure to change +// LibOVR/Projects/Windows/LibOVR.props in parallel. +#define OVR_MINOR_VERSION 24 // +#define OVR_PATCH_VERSION 0 +#define OVR_BUILD_NUMBER 0 + +// This is the ((product * 100) + major) version of the service that the DLL is compatible with. +// When we backport changes to old versions of the DLL we update the old DLLs +// to move this version number up to the latest version. +// The DLL is responsible for checking that the service is the version it supports +// and returning an appropriate error message if it has not been made compatible. +#define OVR_DLL_COMPATIBLE_VERSION 101 + +// This is the minor version representing the minimum version an application can query with this +// SDK. Calls ovr_Initialize will fail if the application requests a version that is less than this. +#define OVR_MIN_REQUESTABLE_MINOR_VERSION 17 + +#define OVR_FEATURE_VERSION 0 + +/// "Major.Minor.Patch" +#if !defined(OVR_VERSION_STRING) +#define OVR_VERSION_STRING OVR_STRINGIZE(OVR_MAJOR_VERSION.OVR_MINOR_VERSION.OVR_PATCH_VERSION) +#endif + +/// "Major.Minor.Patch.Build" +#if !defined(OVR_DETAILED_VERSION_STRING) +#define OVR_DETAILED_VERSION_STRING \ + OVR_STRINGIZE(OVR_MAJOR_VERSION.OVR_MINOR_VERSION.OVR_PATCH_VERSION.OVR_BUILD_NUMBER) +#endif + +/// \brief file description for version info +/// This appears in the user-visible file properties. It is intended to convey publicly +/// available additional information such as feature builds. +#if !defined(OVR_FILE_DESCRIPTION_STRING) +#if defined(_DEBUG) +#define OVR_FILE_DESCRIPTION_STRING "dev build debug" +#else +#define OVR_FILE_DESCRIPTION_STRING "dev build" +#endif +#endif + +#endif // OVR_Version_h diff --git a/Makefile b/Makefile new file mode 100755 index 0000000..350534c --- /dev/null +++ b/Makefile @@ -0,0 +1,41 @@ + +CXX = i686-w64-mingw32-g++ +LINK = i686-w64-mingw32-g++ --shared +CXXFLAGS = -pipe -fPIC -O2 -fpermissive -DUNICODE=1 -D_UNICODE=1 +//-DNTSTATUS=DWORD + +LIBOVRPATH = . +INCPATH = -I. -IInclude -ISrc -I3rdparty +SRCPATH = Shim +OBJPATH = Shim +CXXBUILD = $(CXX) -c $(CXXFLAGS) $(INCPATH) -o $(OBJPATH)/ + +TARGET = libovr.dll + +all: $(TARGET) + +OBJECTS = \ + $(OBJPATH)/OVR_CAPIShim.o \ + $(OBJPATH)/OVR_CAPI_Util.o \ + $(OBJPATH)/OVR_StereoProjection.o + +$(TARGET): $(OBJECTS) + $(LINK) -o $(TARGET) $(OBJECTS) $(LINKFLAGS) + +$(OBJPATH)/OVR_CAPIShim.o: $(SRCPATH)/OVR_CAPIShim.c + $(CXXBUILD)OVR_CAPIShim.o $(SRCPATH)/OVR_CAPIShim.c + +$(OBJPATH)/OVR_CAPI_Util.o: $(SRCPATH)/OVR_CAPI_Util.cpp + $(CXXBUILD)OVR_CAPI_Util.o $(SRCPATH)/OVR_CAPI_Util.cpp + +$(OBJPATH)/OVR_StereoProjection.o: $(SRCPATH)/OVR_StereoProjection.cpp + $(CXXBUILD)OVR_StereoProjection.o $(SRCPATH)/OVR_StereoProjection.cpp + +install: $(TARGET) + cp $(TARGET) /usr/i686-w64-mingw32/lib/ + cp -a Include/* /usr/i686-w64-mingw32/include/ + +clean: + rm -f $(OBJPATH)/*.o + rm -f $(TARGET) + diff --git a/Shim/OVR_CAPIShim.c b/Shim/OVR_CAPIShim.c new file mode 100755 index 0000000..49e0880 --- /dev/null +++ b/Shim/OVR_CAPIShim.c @@ -0,0 +1,2104 @@ +/************************************************************************************ + +Filename : OVR_CAPIShim.c +Content : CAPI DLL user library +Created : November 20, 2014 +Copyright : Copyright 2014-2016 Oculus VR, LLC All Rights reserved. + +Licensed under the Oculus VR Rift SDK License Version 3.3 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, +which is provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +You may obtain a copy of the License at + +http://www.oculusvr.com/licenses/LICENSE-3.3 + +Unless required by applicable law or agreed to in writing, the Oculus VR SDK +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +************************************************************************************/ + +#include "OVR_CAPI.h" +#include "OVR_Version.h" +#include "OVR_ErrorCode.h" +#include "OVR_CAPI_Prototypes.h" +#include +#include +#include +#include +#include +#include +#include + +#if defined(_WIN32) +#if defined(_MSC_VER) +#pragma warning(push, 0) +#endif +#include +#if defined(_MSC_VER) +#pragma warning(pop) +#endif + +#include "../Include/OVR_CAPI_D3D.h" +#else +#if defined(__APPLE__) +#include +#include +#include +#include +#include +#endif +#include +#include +#include +#endif +#include "../Include/OVR_CAPI_GL.h" +#include "../Include/OVR_CAPI_Vk.h" + + +#if defined(_MSC_VER) +#pragma warning(push) +#pragma warning(disable : 4996) // 'getenv': This function or variable may be unsafe. +#endif + +// clang-format off +static const uint8_t OculusSDKUniqueIdentifier[] = { + 0x9E, 0xB2, 0x0B, 0x1A, 0xB7, 0x97, 0x09, 0x20, 0xE0, 0xFB, 0x83, 0xED, 0xF8, 0x33, 0x5A, 0xEB, + 0x80, 0x4D, 0x8E, 0x92, 0x20, 0x69, 0x13, 0x56, 0xB4, 0xBB, 0xC4, 0x85, 0xA7, 0x9E, 0xA4, 0xFE, + OVR_MAJOR_VERSION, OVR_MINOR_VERSION, OVR_PATCH_VERSION +}; + +// clang-format on + +static const uint8_t OculusSDKUniqueIdentifierXORResult = 0xcb; + +// ----------------------------------------------------------------------------------- +// ***** OVR_ENABLE_DEVELOPER_SEARCH +// +// If defined then our shared library loading code searches for developer build +// directories. +// +#if !defined(OVR_ENABLE_DEVELOPER_SEARCH) +#endif + +// ----------------------------------------------------------------------------------- +// ***** OVR_BUILD_DEBUG +// +// Defines OVR_BUILD_DEBUG when the compiler default debug preprocessor is set. +// +// If you want to control the behavior of these flags, then explicitly define +// either -DOVR_BUILD_RELEASE or -DOVR_BUILD_DEBUG in the compiler arguments. + +#if !defined(OVR_BUILD_DEBUG) && !defined(OVR_BUILD_RELEASE) +#if defined(_MSC_VER) +#if defined(_DEBUG) +#define OVR_BUILD_DEBUG +#endif +#else +#if defined(DEBUG) +#define OVR_BUILD_DEBUG +#endif +#endif +#endif + +//----------------------------------------------------------------------------------- +// ***** FilePathCharType, ModuleHandleType, ModuleFunctionType +// +#if defined(_WIN32) // We need to use wchar_t on Microsoft platforms, as that's the native file +// system character type. +#define FilePathCharType \ + wchar_t // #define instead of typedef because debuggers (VC++, XCode) don't recognize typedef'd +// types as a string type. +typedef HMODULE ModuleHandleType; +typedef FARPROC ModuleFunctionType; +#else +#define FilePathCharType char +typedef void* ModuleHandleType; +typedef void* ModuleFunctionType; +#endif + +#define ModuleHandleTypeNull ((ModuleHandleType)NULL) +#define ModuleFunctionTypeNull ((ModuleFunctionType)NULL) + +//----------------------------------------------------------------------------------- +// ***** OVR_MAX_PATH +// +#if !defined(OVR_MAX_PATH) +#if defined(_WIN32) +#define OVR_MAX_PATH _MAX_PATH +#elif defined(__APPLE__) +#define OVR_MAX_PATH PATH_MAX +#else +#define OVR_MAX_PATH 1024 +#endif +#endif + +#if !defined(OVR_DLSYM) +#if defined(_WIN32) +#define OVR_DLSYM(dlImage, name) GetProcAddress(dlImage, name) +#else +#define OVR_DLSYM(dlImage, name) dlsym(dlImage, name) +#endif +#endif + +static size_t OVR_strlcpy(char* dest, const char* src, size_t destsize) { + const char* s = src; + size_t n = destsize; + + if (n && --n) { + do { + if ((*dest++ = *s++) == 0) + break; + } while (--n); + } + + if (!n) { + if (destsize) + *dest = 0; + while (*s++) { + } + } + + return (size_t)((s - src) - 1); +} + +static size_t OVR_strlcat(char* dest, const char* src, size_t destsize) { + const size_t d = destsize ? strlen(dest) : 0; + const size_t s = strlen(src); + const size_t t = s + d; + + if (t < destsize) + memcpy(dest + d, src, (s + 1) * sizeof(*src)); + else { + if (destsize) { + memcpy(dest + d, src, ((destsize - d) - 1) * sizeof(*src)); + dest[destsize - 1] = 0; + } + } + + return t; +} + +#if defined(__APPLE__) +static ovrBool +OVR_strend(const char* pStr, const char* pFind, size_t strLength, size_t findLength) { + if (strLength == (size_t)-1) + strLength = strlen(pStr); + if (findLength == (size_t)-1) + findLength = strlen(pFind); + if (strLength >= findLength) + return (strcmp(pStr + strLength - findLength, pFind) == 0); + return ovrFalse; +} + +static ovrBool OVR_isBundleFolder(const char* filePath) { + static const char* extensionArray[] = {".app", ".bundle", ".framework", ".plugin", ".kext"}; + size_t i; + + for (i = 0; i < sizeof(extensionArray) / sizeof(extensionArray[0]); i++) { + if (OVR_strend(filePath, extensionArray[i], (size_t)-1, (size_t)-1)) + return ovrTrue; + } + + return ovrFalse; +} +#endif + +#if defined(OVR_ENABLE_DEVELOPER_SEARCH) + +// Returns true if the path begins with the given prefix. +// Doesn't support non-ASCII paths, else the return value may be incorrect. +static int OVR_PathStartsWith(const FilePathCharType* path, const char* prefix) { + while (*prefix) { + if (tolower((unsigned char)*path++) != tolower((unsigned char)*prefix++)) + return ovrFalse; + } + + return ovrTrue; +} + +#endif + +static ovrBool OVR_GetCurrentWorkingDirectory( + FilePathCharType* directoryPath, + size_t directoryPathCapacity) { +#if defined(_WIN32) + DWORD dwSize = GetCurrentDirectoryW((DWORD)directoryPathCapacity, directoryPath); + + if ((dwSize > 0) && + (directoryPathCapacity > 1)) // Test > 1 so we have room to possibly append a \ char. + { + size_t length = wcslen(directoryPath); + + if ((length == 0) || + ((directoryPath[length - 1] != L'\\') && (directoryPath[length - 1] != L'/'))) { + directoryPath[length++] = L'\\'; + directoryPath[length] = L'\0'; + } + + return ovrTrue; + } + +#else + char* cwd = getcwd(directoryPath, directoryPathCapacity); + + if (cwd && directoryPath[0] && + (directoryPathCapacity > 1)) // Test > 1 so we have room to possibly append a / char. + { + size_t length = strlen(directoryPath); + + if ((length == 0) || (directoryPath[length - 1] != '/')) { + directoryPath[length++] = '/'; + directoryPath[length] = '\0'; + } + + return ovrTrue; + } +#endif + + if (directoryPathCapacity > 0) + directoryPath[0] = '\0'; + + return ovrFalse; +} + +// The appContainer argument is specific currently to only Macintosh. If true and the application is +// a .app bundle then it returns the +// location of the bundle and not the path to the executable within the bundle. Else return the path +// to the executable binary itself. +// The moduleHandle refers to the relevant dynamic (a.k.a. shared) library. The main executable is +// the main module, and each of the shared +// libraries is a module. This way you can specify that you want to know the directory of the given +// shared library, which may be different +// from the main executable. If the moduleHandle is NULL then the current application module is +// used. +static ovrBool OVR_GetCurrentApplicationDirectory( + FilePathCharType* directoryPath, + size_t directoryPathCapacity, + ovrBool appContainer, + ModuleHandleType moduleHandle) { +#if defined(_WIN32) + DWORD length = GetModuleFileNameW(moduleHandle, directoryPath, (DWORD)directoryPathCapacity); + DWORD pos; + + if ((length != 0) && + (length < + (DWORD)directoryPathCapacity)) // If there wasn't an error and there was enough capacity... + { + for (pos = length; (pos > 0) && (directoryPath[pos] != '\\') && (directoryPath[pos] != '/'); + --pos) { + if ((directoryPath[pos - 1] != '\\') && (directoryPath[pos - 1] != '/')) + directoryPath[pos - 1] = 0; + } + + return ovrTrue; + } + + (void)appContainer; // Not used on this platform. + +#elif defined(__APPLE__) + uint32_t directoryPathCapacity32 = (uint32_t)directoryPathCapacity; + int result = _NSGetExecutablePath(directoryPath, &directoryPathCapacity32); + + if (result == 0) // If success... + { + char realPath[OVR_MAX_PATH]; + + if (realpath(directoryPath, realPath)) // realpath returns the canonicalized absolute file path. + { + size_t length = 0; + + if (appContainer) // If the caller wants the path to the containing bundle... + { + char containerPath[OVR_MAX_PATH]; + ovrBool pathIsContainer; + + OVR_strlcpy(containerPath, realPath, sizeof(containerPath)); + pathIsContainer = OVR_isBundleFolder(containerPath); + + while (!pathIsContainer && strncmp(containerPath, ".", OVR_MAX_PATH) && + strncmp(containerPath, "/", OVR_MAX_PATH)) // While the container we're looking for + // is not found and while the path doesn't + // start with a . or / + { + OVR_strlcpy(containerPath, dirname(containerPath), sizeof(containerPath)); + pathIsContainer = OVR_isBundleFolder(containerPath); + } + + if (pathIsContainer) + length = OVR_strlcpy(directoryPath, containerPath, directoryPathCapacity); + } + + if (length == 0) // If not set above in the appContainer block... + length = OVR_strlcpy(directoryPath, realPath, directoryPathCapacity); + + while (length-- && (directoryPath[length] != '/')) + directoryPath[length] = + '\0'; // Strip the file name from the file path, leaving a trailing / char. + + return ovrTrue; + } + } + + (void)moduleHandle; // Not used on this platform. + +#else + ssize_t length = readlink("/proc/self/exe", directoryPath, directoryPathCapacity); + ssize_t pos; + + if (length > 0) { + for (pos = length; (pos > 0) && (directoryPath[pos] != '/'); --pos) { + if (directoryPath[pos - 1] != '/') + directoryPath[pos - 1] = '\0'; + } + + return ovrTrue; + } + + (void)appContainer; // Not used on this platform. + (void)moduleHandle; +#endif + + if (directoryPathCapacity > 0) + directoryPath[0] = '\0'; + + return ovrFalse; +} + +#if defined(_WIN32) || defined(OVR_ENABLE_DEVELOPER_SEARCH) // Used only in these cases + +// Get the file path to the current module's (DLL or EXE) directory within the current process. +// Will be different from the process module handle if the current module is a DLL and is in a +// different directory than the EXE module. +// If successful then directoryPath will be valid and ovrTrue is returned, else directoryPath will +// be empty and ovrFalse is returned. +static ovrBool OVR_GetCurrentModuleDirectory( + FilePathCharType* directoryPath, + size_t directoryPathCapacity, + ovrBool appContainer) { +#if defined(_WIN32) + HMODULE hModule; + BOOL result = GetModuleHandleExW( + GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS | GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT, + (LPCWSTR)(uintptr_t)OVR_GetCurrentModuleDirectory, + &hModule); + if (result) + OVR_GetCurrentApplicationDirectory(directoryPath, directoryPathCapacity, ovrTrue, hModule); + else + directoryPath[0] = 0; + + (void)appContainer; + + return directoryPath[0] ? ovrTrue : ovrFalse; +#else + return OVR_GetCurrentApplicationDirectory( + directoryPath, directoryPathCapacity, appContainer, NULL); +#endif +} + +#endif + +#if defined(_WIN32) + +#ifdef _MSC_VER +#pragma warning(push) +#pragma warning(disable : 4201) +#endif + +#include +#include + +#ifdef _MSC_VER +#pragma warning(pop) +#endif + +// Expected certificates: +#define ExpectedNumCertificates 3 +typedef struct CertificateEntry_t { + const wchar_t* Issuer; + const wchar_t* Subject; +} CertificateEntry; + +CertificateEntry NewCertificateChain[ExpectedNumCertificates] = { + {L"DigiCert SHA2 Assured ID Code Signing CA", L"Oculus VR, LLC"}, + {L"DigiCert Assured ID Root CA", L"DigiCert SHA2 Assured ID Code Signing CA"}, + {L"DigiCert Assured ID Root CA", L"DigiCert Assured ID Root CA"}, +}; + +#define CertificateChainCount 1 +CertificateEntry* AllowedCertificateChains[CertificateChainCount] = {NewCertificateChain}; + +typedef WINCRYPT32API DWORD(WINAPI* PtrCertGetNameStringW)( + PCCERT_CONTEXT pCertContext, + DWORD dwType, + DWORD dwFlags, + void* pvTypePara, + LPWSTR pszNameString, + DWORD cchNameString); +typedef LONG(WINAPI* PtrWinVerifyTrust)(HWND hwnd, GUID* pgActionID, LPVOID pWVTData); +typedef CRYPT_PROVIDER_DATA*(WINAPI* PtrWTHelperProvDataFromStateData)(HANDLE hStateData); +typedef CRYPT_PROVIDER_SGNR*(WINAPI* PtrWTHelperGetProvSignerFromChain)( + CRYPT_PROVIDER_DATA* pProvData, + DWORD idxSigner, + BOOL fCounterSigner, + DWORD idxCounterSigner); + +PtrCertGetNameStringW m_PtrCertGetNameStringW = 0; +PtrWinVerifyTrust m_PtrWinVerifyTrust = 0; +PtrWTHelperProvDataFromStateData m_PtrWTHelperProvDataFromStateData = 0; +PtrWTHelperGetProvSignerFromChain m_PtrWTHelperGetProvSignerFromChain = 0; + +typedef enum ValidateCertificateContentsResult_ { + VCCRSuccess = 0, + VCCRErrorCertCount = -1, + VCCRErrorTrust = -2, + VCCRErrorValidation = -3 +} ValidateCertificateContentsResult; + +static ValidateCertificateContentsResult ValidateCertificateContents( + CertificateEntry* chain, + CRYPT_PROVIDER_SGNR* cps) { + int certIndex; + + if (!cps || !cps->pasCertChain || cps->csCertChain != ExpectedNumCertificates) { + return VCCRErrorCertCount; + } + + for (certIndex = 0; certIndex < ExpectedNumCertificates; ++certIndex) { + CRYPT_PROVIDER_CERT* pCertData = &cps->pasCertChain[certIndex]; + wchar_t subjectStr[400] = {0}; + wchar_t issuerStr[400] = {0}; + + if ((pCertData->fSelfSigned && !pCertData->fTrustedRoot) || pCertData->fTestCert) { + return VCCRErrorTrust; + } + + m_PtrCertGetNameStringW( + pCertData->pCert, + CERT_NAME_ATTR_TYPE, + 0, + szOID_COMMON_NAME, + subjectStr, + ARRAYSIZE(subjectStr)); + + m_PtrCertGetNameStringW( + pCertData->pCert, + CERT_NAME_ATTR_TYPE, + CERT_NAME_ISSUER_FLAG, + 0, + issuerStr, + ARRAYSIZE(issuerStr)); + + if (wcscmp(subjectStr, chain[certIndex].Subject) != 0 || + wcscmp(issuerStr, chain[certIndex].Issuer) != 0) { + return VCCRErrorValidation; + } + } + + return VCCRSuccess; +} + +#define OVR_SIGNING_CONVERT_PTR(ftype, fptr, procaddr) \ + { \ + union { \ + ftype p1; \ + ModuleFunctionType p2; \ + } u; \ + u.p2 = procaddr; \ + fptr = u.p1; \ + } + +static BOOL OVR_Win32_SignCheck(FilePathCharType* fullPath, HANDLE hFile) { + WINTRUST_FILE_INFO fileData; + WINTRUST_DATA wintrustData; + GUID actionGUID = WINTRUST_ACTION_GENERIC_VERIFY_V2; + LONG resultStatus; + BOOL verified = FALSE; + HMODULE libWinTrust = LoadLibraryW(L"wintrust"); + HMODULE libCrypt32 = LoadLibraryW(L"crypt32"); + if (libWinTrust == NULL || libCrypt32 == NULL) { + return FALSE; + } + + OVR_SIGNING_CONVERT_PTR( + PtrCertGetNameStringW, + m_PtrCertGetNameStringW, + GetProcAddress(libCrypt32, "CertGetNameStringW")); + OVR_SIGNING_CONVERT_PTR( + PtrWinVerifyTrust, m_PtrWinVerifyTrust, GetProcAddress(libWinTrust, "WinVerifyTrust")); + OVR_SIGNING_CONVERT_PTR( + PtrWTHelperProvDataFromStateData, + m_PtrWTHelperProvDataFromStateData, + GetProcAddress(libWinTrust, "WTHelperProvDataFromStateData")); + OVR_SIGNING_CONVERT_PTR( + PtrWTHelperGetProvSignerFromChain, + m_PtrWTHelperGetProvSignerFromChain, + GetProcAddress(libWinTrust, "WTHelperGetProvSignerFromChain")); + + if (m_PtrCertGetNameStringW == NULL || m_PtrWinVerifyTrust == NULL || + m_PtrWTHelperProvDataFromStateData == NULL || m_PtrWTHelperGetProvSignerFromChain == NULL) { + return FALSE; + } + + if (hFile == INVALID_HANDLE_VALUE || fullPath == NULL) { + return FALSE; + } + + ZeroMemory(&fileData, sizeof(fileData)); + fileData.cbStruct = sizeof(fileData); + fileData.pcwszFilePath = fullPath; + fileData.hFile = hFile; + + ZeroMemory(&wintrustData, sizeof(wintrustData)); + wintrustData.cbStruct = sizeof(wintrustData); + wintrustData.pFile = &fileData; + wintrustData.dwUnionChoice = WTD_CHOICE_FILE; // Specify WINTRUST_FILE_INFO. + wintrustData.dwUIChoice = WTD_UI_NONE; // Do not display any UI. + wintrustData.dwUIContext = WTD_UICONTEXT_EXECUTE; // Hint that this is about app execution. + wintrustData.fdwRevocationChecks = WTD_REVOKE_NONE; + wintrustData.dwProvFlags = WTD_REVOCATION_CHECK_NONE; + wintrustData.dwStateAction = WTD_STATEACTION_VERIFY; + wintrustData.hWVTStateData = 0; + + resultStatus = m_PtrWinVerifyTrust( + (HWND)INVALID_HANDLE_VALUE, // Do not display any UI. + &actionGUID, // V2 verification + &wintrustData); + + if (resultStatus == ERROR_SUCCESS && wintrustData.hWVTStateData != 0 && + wintrustData.hWVTStateData != INVALID_HANDLE_VALUE) { + CRYPT_PROVIDER_DATA* cpd = m_PtrWTHelperProvDataFromStateData(wintrustData.hWVTStateData); + if (cpd && cpd->csSigners == 1) { + CRYPT_PROVIDER_SGNR* cps = m_PtrWTHelperGetProvSignerFromChain(cpd, 0, FALSE, 0); + int chainIndex; + for (chainIndex = 0; chainIndex < CertificateChainCount; ++chainIndex) { + CertificateEntry* chain = AllowedCertificateChains[chainIndex]; + if (VCCRSuccess == ValidateCertificateContents(chain, cps)) { + verified = TRUE; + break; + } + } + } + } + + wintrustData.dwStateAction = WTD_STATEACTION_CLOSE; + + m_PtrWinVerifyTrust( + (HWND)INVALID_HANDLE_VALUE, // Do not display any UI. + &actionGUID, // V2 verification + &wintrustData); + + return verified; +} + +#endif // #if defined(_WIN32) + +static ModuleHandleType OVR_OpenLibrary(const FilePathCharType* libraryPath, ovrResult* result) { +#if defined(_WIN32) + DWORD fullPathNameLen = 0; + FilePathCharType fullPath[MAX_PATH] = {0}; + HANDLE hFilePinned = INVALID_HANDLE_VALUE; + ModuleHandleType hModule = 0; + + *result = ovrSuccess; + + fullPathNameLen = GetFullPathNameW(libraryPath, MAX_PATH, fullPath, 0); + if (fullPathNameLen <= 0 || fullPathNameLen >= MAX_PATH) { + *result = ovrError_LibPath; + return NULL; + } + + hFilePinned = CreateFileW( + fullPath, GENERIC_READ, FILE_SHARE_READ, 0, OPEN_EXISTING, FILE_ATTRIBUTE_READONLY, 0); + + if (hFilePinned == INVALID_HANDLE_VALUE) { + *result = ovrError_LibPath; + return NULL; + } + + if (!OVR_Win32_SignCheck(fullPath, hFilePinned)) { + *result = ovrError_LibSignCheck; + CloseHandle(hFilePinned); + return NULL; + } + + hModule = LoadLibraryW(fullPath); + + CloseHandle(hFilePinned); + + if (hModule == NULL) { + *result = ovrError_LibLoad; + } + + return hModule; +#else + *result = ovrSuccess; + + // Don't bother trying to dlopen() a file that is not even there. + if (access(libraryPath, X_OK | R_OK) != 0) { + *result = ovrError_LibPath; + return NULL; + } + + dlerror(); // Clear any previous dlopen() errors + + // Use RTLD_NOW because we don't want unexpected stalls at runtime, and the library isn't very + // large. + // Use RTLD_LOCAL to avoid unilaterally exporting resolved symbols to the rest of this process. + void* lib = dlopen(libraryPath, RTLD_NOW | RTLD_LOCAL); + + if (!lib) { + fprintf(stderr, "ERROR: Can't load '%s':\n%s\n", libraryPath, dlerror()); + } + + return lib; +#endif +} + +static void OVR_CloseLibrary(ModuleHandleType hLibrary) { + if (hLibrary) { +#if defined(_WIN32) + // We may need to consider what to do in the case that the library is in an exception state. + // In a Windows C++ DLL, all global objects (including static members of classes) will be + // constructed just + // before the calling of the DllMain with DLL_PROCESS_ATTACH and they will be destroyed just + // after + // the call of the DllMain with DLL_PROCESS_DETACH. We may need to intercept DLL_PROCESS_DETACH + // and + // have special handling for the case that the DLL is broken. + FreeLibrary(hLibrary); +#else + dlclose(hLibrary); +#endif + } +} + +// Returns a valid ModuleHandleType (e.g. Windows HMODULE) or returns ModuleHandleTypeNull (e.g. +// NULL). +// The caller is required to eventually call OVR_CloseLibrary on a valid return handle. +// +static ModuleHandleType OVR_FindLibraryPath( + int requestedProductVersion, + int requestedMajorVersion, + FilePathCharType* libraryPath, + size_t libraryPathCapacity, + ovrResult* result) { + ModuleHandleType moduleHandle; + int printfResult; + FilePathCharType developerDir[OVR_MAX_PATH] = {'\0'}; + +#if defined(_MSC_VER) +#if defined(_WIN64) + const char* pBitDepth = "64"; +#else + const char* pBitDepth = "32"; +#endif +#elif defined(__APPLE__) +// For Apple platforms we are using a Universal Binary LibOVRRT dylib which has both 32 and 64 in +// it. +#else // Other Unix. +#if defined(__x86_64__) + const char* pBitDepth = "64"; +#else + const char* pBitDepth = "32"; +#endif +#endif + + (void)requestedProductVersion; + + *result = ovrError_LibLoad; + moduleHandle = ModuleHandleTypeNull; + if (libraryPathCapacity) + libraryPath[0] = '\0'; + +// Note: OVR_ENABLE_DEVELOPER_SEARCH is deprecated in favor of the simpler LIBOVR_DLL_DIR, as the +// edge +// case uses of the former created some complications that may be best solved by simply using a +// LIBOVR_DLL_DIR +// environment variable which the user can set in their debugger or system environment variables. +#if (defined(_MSC_VER) || defined(_WIN32)) && !defined(OVR_FILE_PATH_SEPARATOR) +#define OVR_FILE_PATH_SEPARATOR "\\" +#else +#define OVR_FILE_PATH_SEPARATOR "/" +#endif + + { + const char* pLibOvrDllDir = + getenv("LIBOVR_DLL_DIR"); // Example value: /dev/OculusSDK/Main/LibOVR/Mac/Debug/ + + if (pLibOvrDllDir) { + char developerDir8[OVR_MAX_PATH]; + size_t length = OVR_strlcpy( + developerDir8, + pLibOvrDllDir, + sizeof(developerDir8)); // If missing a trailing path separator then append one. + + if ((length > 0) && (length < sizeof(developerDir8)) && + (developerDir8[length - 1] != OVR_FILE_PATH_SEPARATOR[0])) { + length = OVR_strlcat(developerDir8, OVR_FILE_PATH_SEPARATOR, sizeof(developerDir8)); + + if (length < sizeof(developerDir8)) { +#if defined(_WIN32) + size_t i; + for (i = 0; i <= length; ++i) // ASCII conversion of 8 to 16 bit text. + developerDir[i] = (FilePathCharType)(uint8_t)developerDir8[i]; +#else + OVR_strlcpy(developerDir, developerDir8, sizeof(developerDir)); +#endif + } + } + } + } + +// Support checking for a developer library location override via the OVR_SDK_ROOT environment +// variable. +// This pathway is deprecated in favor of using LIBOVR_DLL_DIR instead. +#if defined(OVR_ENABLE_DEVELOPER_SEARCH) + if (!developerDir[0]) // If not already set by LIBOVR_DLL_PATH... + { + // __FILE__ maps to /LibOVR/Src/OVR_CAPIShim.c + char sdkRoot[OVR_MAX_PATH]; + char* pLibOVR; + size_t i; + + // We assume that __FILE__ returns a full path, which isn't the case for some compilers. + // Need to compile with /FC under VC++ for __FILE__ to expand to the full file path. + // NOTE: This needs to be fixed on Mac. __FILE__ is not expanded to full path under clang. + OVR_strlcpy(sdkRoot, __FILE__, sizeof(sdkRoot)); + for (i = 0; sdkRoot[i]; ++i) + sdkRoot[i] = (char)tolower(sdkRoot[i]); // Microsoft doesn't maintain case. + pLibOVR = strstr(sdkRoot, "libovr"); + if (pLibOVR && (pLibOVR > sdkRoot)) + pLibOVR[-1] = '\0'; + else + sdkRoot[0] = '\0'; + + if (sdkRoot[0]) { + // We want to use a developer version of the library only if the application is also being + // executed from + // a developer location. Ideally we would do this by checking that the relative path from the + // executable to + // the shared library is the same at runtime as it was when the executable was first built, + // but we don't have + // an easy way to do that from here and it would require some runtime help from the + // application code. + // Instead we verify that the application is simply in the same developer tree that was was + // when built. + // We could put in some additional logic to make it very likely to know if the EXE is in its + // original location. + FilePathCharType modulePath[OVR_MAX_PATH]; + const ovrBool pathMatch = OVR_GetCurrentModuleDirectory(modulePath, OVR_MAX_PATH, ovrTrue) && + (OVR_PathStartsWith(modulePath, sdkRoot) == ovrTrue); + if (pathMatch == ovrFalse) { + sdkRoot[0] = '\0'; // The application module is not in the developer tree, so don't try to + // use the developer shared library. + } + } + + if (sdkRoot[0]) { + +#ifndef CONFIG_VARIANT +#define CONFIG_VARIANT +#endif + +#if defined(OVR_BUILD_DEBUG) + const char* pConfigDirName = "Debug" CONFIG_VARIANT; +#else + const char* pConfigDirName = "Release" CONFIG_VARIANT; +#endif + +#undef CONFIG_VARIANT + +#if defined(_MSC_VER) +#if defined(_WIN64) + const char* pArchDirName = "x64"; +#else + const char* pArchDirName = "Win32"; +#endif +#else +#if defined(__x86_64__) + const char* pArchDirName = "x86_64"; +#else + const char* pArchDirName = "i386"; +#endif +#endif + +#if defined(_MSC_VER) && (_MSC_VER == 1600) + const char* pCompilerVersion = "VS2010"; +#elif defined(_MSC_VER) && (_MSC_VER == 1700) + const char* pCompilerVersion = "VS2012"; +#elif defined(_MSC_VER) && (_MSC_VER == 1800) + const char* pCompilerVersion = "VS2013"; +#elif defined(_MSC_VER) && (_MSC_VER >= 1900) + const char* pCompilerVersion = "VS2015"; +#endif + +#if defined(_WIN32) + int count = swprintf_s( + developerDir, + OVR_MAX_PATH, + L"%hs\\LibOVR\\Lib\\Windows\\%hs\\%hs\\%hs\\", + sdkRoot, + pArchDirName, + pConfigDirName, + pCompilerVersion); +#elif defined(__APPLE__) + // Apple/XCode doesn't let you specify an arch in build paths, which is OK if we build a + // universal binary. + (void)pArchDirName; + int count = + snprintf(developerDir, OVR_MAX_PATH, "%s/LibOVR/Lib/Mac/%s/", sdkRoot, pConfigDirName); +#else + int count = snprintf( + developerDir, + OVR_MAX_PATH, + "%s/LibOVR/Lib/Linux/%s/%s/", + sdkRoot, + pArchDirName, + pConfigDirName); +#endif + + if ((count < 0) || + (count >= + (int)OVR_MAX_PATH)) // If there was an error or capacity overflow... clear the string. + { + developerDir[0] = '\0'; + } + } + } +#endif // OVR_ENABLE_DEVELOPER_SEARCH + + { +#if !defined(_WIN32) + FilePathCharType cwDir[OVR_MAX_PATH]; // Will be filled in below. + FilePathCharType appDir[OVR_MAX_PATH]; +#endif + size_t i; + +#if defined(_WIN32) + // On Windows, only search the developer directory and the usual path + const FilePathCharType* directoryArray[2]; + directoryArray[0] = developerDir; // Developer directory. + directoryArray[1] = L""; // No directory, which causes Windows to use the standard search + // strategy to find the DLL. + +#elif defined(__APPLE__) + // https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man1/dyld.1.html + + FilePathCharType homeDir[OVR_MAX_PATH]; + FilePathCharType homeFrameworkDir[OVR_MAX_PATH]; + const FilePathCharType* directoryArray[5]; + size_t homeDirLength = 0; + + const char* pHome = getenv("HOME"); // Try getting the HOME environment variable. + + if (pHome) { + homeDirLength = OVR_strlcpy(homeDir, pHome, sizeof(homeDir)); + } else { + // https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man3/getpwuid_r.3.html + const long pwBufferSize = sysconf(_SC_GETPW_R_SIZE_MAX); + + if (pwBufferSize != -1) { + char pwBuffer[pwBufferSize]; + struct passwd pw; + struct passwd* pwResult = NULL; + + if ((getpwuid_r(getuid(), &pw, pwBuffer, pwBufferSize, &pwResult) == 0) && pwResult) + homeDirLength = OVR_strlcpy(homeDir, pw.pw_dir, sizeof(homeDir)); + } + } + + if (homeDirLength) { + if (homeDir[homeDirLength - 1] == '/') + homeDir[homeDirLength - 1] = '\0'; + OVR_strlcpy(homeFrameworkDir, homeDir, sizeof(homeFrameworkDir)); + OVR_strlcat(homeFrameworkDir, "/Library/Frameworks/", sizeof(homeFrameworkDir)); + } else { + homeFrameworkDir[0] = '\0'; + } + + directoryArray[0] = cwDir; + directoryArray[1] = appDir; + directoryArray[2] = homeFrameworkDir; // ~/Library/Frameworks/ + directoryArray[3] = "/Library/Frameworks/"; // DYLD_FALLBACK_FRAMEWORK_PATH + directoryArray[4] = developerDir; // Developer directory. + +#else +#define STR1(x) #x +#define STR(x) STR1(x) +#ifdef LIBDIR +#define TEST_LIB_DIR STR(LIBDIR) "/" +#else +#define TEST_LIB_DIR appDir +#endif + + const FilePathCharType* directoryArray[5]; + directoryArray[0] = cwDir; + directoryArray[1] = TEST_LIB_DIR; // Directory specified by LIBDIR if defined. + directoryArray[2] = developerDir; // Developer directory. + directoryArray[3] = "/usr/local/lib/"; + directoryArray[4] = "/usr/lib/"; +#endif + +#if !defined(_WIN32) + OVR_GetCurrentWorkingDirectory(cwDir, sizeof(cwDir) / sizeof(cwDir[0])); + OVR_GetCurrentApplicationDirectory(appDir, sizeof(appDir) / sizeof(appDir[0]), ovrTrue, NULL); +#endif + + // Versioned file expectations. + // Windows: LibOVRRT__.dll + // // Example: LibOVRRT64_1_1.dll -- LibOVRRT 64 bit, product 1, major version 1, + // minor/patch/build numbers unspecified in the name. + // Mac: + // LibOVRRT_.framework/Versions//LibOVRRT_ + // // We are not presently using the .framework bundle's Current directory to hold the + // version number. This may change. + // Linux: libOVRRT_.so. + // // The file on disk may contain a minor version number, but a symlink is used to map this + // major-only version to it. + + // Since we are manually loading the LibOVR dynamic library, we need to look in various + // locations for a file + // that matches our requirements. The functionality required is somewhat similar to the + // operating system's + // dynamic loader functionality. Each OS has some differences in how this is handled. + // Future versions of this may iterate over all libOVRRT.so.* files in the directory and use the + // one that matches our requirements. + // + // We need to look for a library that matches the product version and major version of the + // caller's request, + // and that library needs to support a minor version that is >= the requested minor version. + // Currently we + // don't test the minor version here, as the library is named based only on the product and + // major version. + // Currently the minor version test is handled via the initialization of the library and the + // initialization + // fails if minor version cannot be supported by the library. The reason this is done during + // initialization + // is that the library can at runtime support multiple minor versions based on the user's + // request. To the + // external user, all that matters it that they call ovr_Initialize with a requested version and + // it succeeds + // or fails. + // + // The product version is something that is at a higher level than the major version, and is not + // something that's + // always seen in libraries (an example is the well-known LibXml2 library, in which the 2 is + // essentially the product version). + + for (i = 0; i < sizeof(directoryArray) / sizeof(directoryArray[0]); ++i) { +#if defined(_WIN32) + printfResult = swprintf( + libraryPath, + libraryPathCapacity, + L"%lsLibOVRRT%hs_%d.dll", + directoryArray[i], + pBitDepth, + requestedMajorVersion); + + if (*directoryArray[i] == 0) { + int k; + FilePathCharType foundPath[MAX_PATH] = {0}; + DWORD searchResult = SearchPathW(NULL, libraryPath, NULL, MAX_PATH, foundPath, NULL); + if (searchResult <= 0 || searchResult >= libraryPathCapacity) { + continue; + } + foundPath[MAX_PATH - 1] = 0; + for (k = 0; k < MAX_PATH; ++k) { + libraryPath[k] = foundPath[k]; + } + } + +#elif defined(__APPLE__) + // https://developer.apple.com/library/mac/documentation/MacOSX/Conceptual/BPFrameworks/Concepts/VersionInformation.html + // Macintosh application bundles have the option of embedding dependent frameworks within the + // application + // bundle itself. A problem with that is that it doesn't support vendor-supplied updates to + // the framework. + printfResult = + snprintf(libraryPath, libraryPathCapacity, "%sLibOVRRT.dylib", directoryArray[i]); + +#else // Unix + // Applications that depend on the OS (e.g. ld-linux / ldd) can rely on the library being in a + // common location + // such as /usr/lib or can rely on the -rpath linker option to embed a path for the OS to + // check for the library, + // or can rely on the LD_LIBRARY_PATH environment variable being set. It's generally not + // recommended that applications + // depend on LD_LIBRARY_PATH be globally modified, partly due to potentialy security issues. + // Currently we check the current application directory, current working directory, and then + // /usr/lib and possibly others. + printfResult = snprintf( + libraryPath, + libraryPathCapacity, + "%slibOVRRT%s.so.%d", + directoryArray[i], + pBitDepth, + requestedMajorVersion); +#endif + + if ((printfResult >= 0) && (printfResult < (int)libraryPathCapacity)) { + moduleHandle = OVR_OpenLibrary(libraryPath, result); + if (moduleHandle != ModuleHandleTypeNull) + return moduleHandle; + } + } + } + + return moduleHandle; +} + +//----------------------------------------------------------------------------------- +// ***** hLibOVR +// +// global handle to the LivOVR shared library. +// +static ModuleHandleType hLibOVR = NULL; + +// This function is currently unsupported. +ModuleHandleType ovr_GetLibOVRRTHandle() { + return hLibOVR; +} + +//----------------------------------------------------------------------------------- +// ***** Function declarations +// + +//----------------------------------------------------------------------------------- +// ***** OVR_DECLARE_IMPORT +// +// Creates a pointer and loader value union for each entry in OVR_LIST_APIS() +// + +#define OVR_DECLARE_IMPORT(ReturnValue, FunctionName, OptionalVersion, Arguments) \ + union { \ + ReturnValue(OVR_CDECL* Ptr) Arguments; \ + ModuleFunctionType Symbol; \ + } FunctionName; + +#define OVR_IGNORE_IMPORT(ReturnValue, FunctionName, OptionalVersion, Arguments) + +//----------------------------------------------------------------------------------- +// ***** API - a structure with each API entrypoint as a FunctionName.Ptr and FunctionName.Symbol +// union +// + +static struct { OVR_LIST_APIS(OVR_DECLARE_IMPORT, OVR_IGNORE_IMPORT) } API = {{NULL}}; + +static void OVR_UnloadSharedLibrary() { + memset(&API, 0, sizeof(API)); + if (hLibOVR) + OVR_CloseLibrary(hLibOVR); + hLibOVR = NULL; +} + +static ovrResult OVR_LoadSharedLibrary(int requestedProductVersion, int requestedMajorVersion) { + FilePathCharType filePath[OVR_MAX_PATH]; + const char* SymbolName = NULL; + ovrResult result = ovrSuccess; + + if (hLibOVR) + return result; + + hLibOVR = OVR_FindLibraryPath( + requestedProductVersion, + requestedMajorVersion, + filePath, + sizeof(filePath) / sizeof(filePath[0]), + &result); + + if (!hLibOVR) + return result; + + // Zero the API table just to be paranoid + memset(&API, 0, sizeof(API)); + +// Load the current API entrypoint using the catenated FunctionName and OptionalVersion +#define OVR_GETFUNCTION(ReturnValue, FunctionName, OptionalVersion, Arguments) \ + SymbolName = #FunctionName #OptionalVersion; \ + API.FunctionName.Symbol = OVR_DLSYM(hLibOVR, SymbolName); \ + if (!API.FunctionName.Symbol) { \ + fprintf(stderr, "Unable to locate symbol: %s\n", SymbolName); \ + result = ovrError_LibSymbols; \ + goto FailedToLoadSymbol; \ + } + + OVR_LIST_APIS(OVR_GETFUNCTION, OVR_IGNORE_IMPORT) + +#undef OVR_GETFUNCTION + + return result; + +FailedToLoadSymbol: + // Check SymbolName for the name of the API which failed to load + OVR_UnloadSharedLibrary(); + return result; +} + +// These defaults are also in CAPI.cpp +static const ovrInitParams DefaultParams = { + ovrInit_RequestVersion, // Flags + OVR_MINOR_VERSION, // RequestedMinorVersion + 0, // LogCallback + 0, // UserData + 0, // ConnectionTimeoutSeconds + OVR_ON64("") // pad0 +}; + +// Don't put this on the heap +static ovrErrorInfo LastInitializeErrorInfo = {ovrError_NotInitialized, + "ovr_Initialize never called"}; + +OVR_PUBLIC_FUNCTION(ovrResult) ovr_Initialize(const ovrInitParams* inputParams) { + ovrResult result; + ovrInitParams params; + + typedef void(OVR_CDECL * ovr_ReportClientInfoType)( + unsigned int compilerVersion, + int productVersion, + int majorVersion, + int minorVersion, + int patchVersion, + int buildNumber); + ovr_ReportClientInfoType reportClientInfo; + + // Do something with our version signature hash to prevent + // it from being optimized out. In this case, compute + // a cheap CRC. + uint8_t crc = 0; + size_t i; + + for (i = 0; i < (sizeof(OculusSDKUniqueIdentifier) - 3); + ++i) // Minus 3 because we have trailing OVR_MAJOR_VERSION, OVR_MINOR_VERSION, + // OVR_PATCH_VERSION which vary per version. + { + crc ^= OculusSDKUniqueIdentifier[i]; + } + + assert(crc == OculusSDKUniqueIdentifierXORResult); + if (crc != OculusSDKUniqueIdentifierXORResult) { + return ovrError_Initialize; + } + + if (!inputParams) { + params = DefaultParams; + } else { + params = *inputParams; + + // If not requesting a particular minor version, + if (!(params.Flags & ovrInit_RequestVersion)) { + // Enable requesting the default minor version. + params.Flags |= ovrInit_RequestVersion; + params.RequestedMinorVersion = OVR_MINOR_VERSION; + } + } + + // Clear non-writable bits provided by client code. + params.Flags &= ovrinit_WritableBits; + + + + // Error out if the requested minor version is less than our lowest deemed compatible version + // denoted by OVR_MIN_REQUESTABLE_MINOR_VERSION. + // Note: This code has to be in the shim as we want to enforce usage of the new API versions for + // applications being recompiled while maintaining backwards compatibility with older apps + if (params.RequestedMinorVersion < OVR_MIN_REQUESTABLE_MINOR_VERSION) { + // Requested LibOVRRT version too low + result = ovrError_LibVersion; + return result; + } + + // By design we ignore the build version in the library search. + result = OVR_LoadSharedLibrary(OVR_PRODUCT_VERSION, OVR_MAJOR_VERSION); + if (result != ovrSuccess) + return result; + + result = API.ovr_Initialize.Ptr(¶ms); + + if (result != ovrSuccess) { + // Stash the last initialization error for the shim to return if + // ovr_GetLastErrorInfo is called after we unload the dll below + if (API.ovr_GetLastErrorInfo.Ptr) { + API.ovr_GetLastErrorInfo.Ptr(&LastInitializeErrorInfo); + } + OVR_UnloadSharedLibrary(); + } + + reportClientInfo = + (ovr_ReportClientInfoType)(uintptr_t)OVR_DLSYM(hLibOVR, "ovr_ReportClientInfo"); + + if (reportClientInfo) { + unsigned int mscFullVer = 0; +#if defined(_MSC_FULL_VER) + mscFullVer = _MSC_FULL_VER; +#endif // _MSC_FULL_VER + + reportClientInfo( + mscFullVer, + OVR_PRODUCT_VERSION, + OVR_MAJOR_VERSION, + OVR_MINOR_VERSION, + OVR_PATCH_VERSION, + OVR_BUILD_NUMBER); + } + + return result; +} + +OVR_PUBLIC_FUNCTION(void) ovr_Shutdown() { + if (!API.ovr_Shutdown.Ptr) + return; + API.ovr_Shutdown.Ptr(); + OVR_UnloadSharedLibrary(); +} + +OVR_PUBLIC_FUNCTION(const char*) ovr_GetVersionString() { + // We don't directly return the value of the DLL API.ovr_GetVersionString.Ptr call, + // because that call returns a pointer to memory within the DLL. If the DLL goes + // away then that pointer becomes invalid while the process may still be holding + // onto it. So we save a local copy of it which is always valid. + static char dllVersionStringLocal[32]; + const char* dllVersionString; + + if (!API.ovr_GetVersionString.Ptr) + return "(Unable to load LibOVR)"; + + dllVersionString = API.ovr_GetVersionString.Ptr(); // Guaranteed to always be valid. + assert(dllVersionString != NULL); + OVR_strlcpy(dllVersionStringLocal, dllVersionString, sizeof(dllVersionStringLocal)); + + return dllVersionStringLocal; +} + +OVR_PUBLIC_FUNCTION(void) ovr_GetLastErrorInfo(ovrErrorInfo* errorInfo) { + if (!API.ovr_GetLastErrorInfo.Ptr) { + *errorInfo = LastInitializeErrorInfo; + } else + API.ovr_GetLastErrorInfo.Ptr(errorInfo); +} + +OVR_PUBLIC_FUNCTION(ovrHmdDesc) ovr_GetHmdDesc(ovrSession session) { + if (!API.ovr_GetHmdDesc.Ptr) { + ovrHmdDesc hmdDesc; + memset(&hmdDesc, 0, sizeof(hmdDesc)); + hmdDesc.Type = ovrHmd_None; + return hmdDesc; + } + + return API.ovr_GetHmdDesc.Ptr(session); +} + +OVR_PUBLIC_FUNCTION(unsigned int) ovr_GetTrackerCount(ovrSession session) { + if (!API.ovr_GetTrackerCount.Ptr) { + return 0; + } + + return API.ovr_GetTrackerCount.Ptr(session); +} + +OVR_PUBLIC_FUNCTION(ovrTrackerDesc) +ovr_GetTrackerDesc(ovrSession session, unsigned int trackerDescIndex) { + if (!API.ovr_GetTrackerDesc.Ptr) { + ovrTrackerDesc trackerDesc; + memset(&trackerDesc, 0, sizeof(trackerDesc)); + return trackerDesc; + } + + return API.ovr_GetTrackerDesc.Ptr(session, trackerDescIndex); +} + +OVR_PUBLIC_FUNCTION(ovrResult) ovr_Create(ovrSession* pSession, ovrGraphicsLuid* pLuid) { + if (!API.ovr_Create.Ptr) + return ovrError_NotInitialized; + return API.ovr_Create.Ptr(pSession, pLuid); +} + +OVR_PUBLIC_FUNCTION(void) ovr_Destroy(ovrSession session) { + if (!API.ovr_Destroy.Ptr) + return; + API.ovr_Destroy.Ptr(session); +} + +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_GetSessionStatus(ovrSession session, ovrSessionStatus* sessionStatus) { + if (!API.ovr_GetSessionStatus.Ptr) { + if (sessionStatus) { + sessionStatus->IsVisible = ovrFalse; + sessionStatus->HmdPresent = ovrFalse; + sessionStatus->HmdMounted = ovrFalse; + sessionStatus->ShouldQuit = ovrFalse; + sessionStatus->DisplayLost = ovrFalse; + sessionStatus->ShouldRecenter = ovrFalse; + sessionStatus->HasInputFocus = ovrFalse; + sessionStatus->OverlayPresent = ovrFalse; + sessionStatus->DepthRequested = ovrFalse; + } + + return ovrError_NotInitialized; + } + + return API.ovr_GetSessionStatus.Ptr(session, sessionStatus); +} + + +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_IsExtensionSupported(ovrSession session, ovrExtensions extension, ovrBool* extensionSupported) { + if (!API.ovr_IsExtensionSupported.Ptr) + return ovrError_NotInitialized; + return API.ovr_IsExtensionSupported.Ptr(session, extension, extensionSupported); +} + +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_EnableExtension(ovrSession session, ovrExtensions extension) { + if (!API.ovr_EnableExtension.Ptr) + return ovrError_NotInitialized; + return API.ovr_EnableExtension.Ptr(session, extension); +} + +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_SetTrackingOriginType(ovrSession session, ovrTrackingOrigin origin) { + if (!API.ovr_SetTrackingOriginType.Ptr) + return ovrError_NotInitialized; + return API.ovr_SetTrackingOriginType.Ptr(session, origin); +} + +OVR_PUBLIC_FUNCTION(ovrTrackingOrigin) ovr_GetTrackingOriginType(ovrSession session) { + if (!API.ovr_GetTrackingOriginType.Ptr) + return ovrTrackingOrigin_EyeLevel; + return API.ovr_GetTrackingOriginType.Ptr(session); +} + +OVR_PUBLIC_FUNCTION(ovrResult) ovr_RecenterTrackingOrigin(ovrSession session) { + if (!API.ovr_RecenterTrackingOrigin.Ptr) + return ovrError_NotInitialized; + return API.ovr_RecenterTrackingOrigin.Ptr(session); +} + +OVR_PUBLIC_FUNCTION(ovrResult) ovr_SpecifyTrackingOrigin(ovrSession session, ovrPosef originPose) { + if (!API.ovr_SpecifyTrackingOrigin.Ptr) + return ovrError_NotInitialized; + return API.ovr_SpecifyTrackingOrigin.Ptr(session, originPose); +} + +OVR_PUBLIC_FUNCTION(void) ovr_ClearShouldRecenterFlag(ovrSession session) { + if (!API.ovr_ClearShouldRecenterFlag.Ptr) + return; + API.ovr_ClearShouldRecenterFlag.Ptr(session); +} + +OVR_PUBLIC_FUNCTION(ovrTrackingState) +ovr_GetTrackingState(ovrSession session, double absTime, ovrBool latencyMarker) { + if (!API.ovr_GetTrackingState.Ptr) { + ovrTrackingState nullTrackingState; + memset(&nullTrackingState, 0, sizeof(nullTrackingState)); + return nullTrackingState; + } + + return API.ovr_GetTrackingState.Ptr(session, absTime, latencyMarker); +} + +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_GetDevicePoses( + ovrSession session, + ovrTrackedDeviceType* deviceTypes, + int deviceCount, + double absTime, + ovrPoseStatef* outDevicePoses) { + if (!API.ovr_GetDevicePoses.Ptr) + return ovrError_NotInitialized; + return API.ovr_GetDevicePoses.Ptr(session, deviceTypes, deviceCount, absTime, outDevicePoses); +} + +OVR_PUBLIC_FUNCTION(ovrTrackingState) +ovr_GetTrackingStateWithSensorData( + ovrSession session, + double absTime, + ovrBool latencyMarker, + ovrSensorData* sensorData) { + if (!API.ovr_GetTrackingStateWithSensorData.Ptr) { + ovrTrackingState nullTrackingState; + memset(&nullTrackingState, 0, sizeof(nullTrackingState)); + if (sensorData) + memset(&sensorData, 0, sizeof(sensorData)); + return nullTrackingState; + } + + return API.ovr_GetTrackingStateWithSensorData.Ptr(session, absTime, latencyMarker, sensorData); +} + +OVR_PUBLIC_FUNCTION(ovrTrackerPose) +ovr_GetTrackerPose(ovrSession session, unsigned int trackerPoseIndex) { + if (!API.ovr_GetTrackerPose.Ptr) { + ovrTrackerPose nullTrackerPose; + memset(&nullTrackerPose, 0, sizeof(nullTrackerPose)); + return nullTrackerPose; + } + + return API.ovr_GetTrackerPose.Ptr(session, trackerPoseIndex); +} + +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_GetInputState(ovrSession session, ovrControllerType controllerType, ovrInputState* inputState) { + if (!API.ovr_GetInputState.Ptr) { + if (inputState) + memset(inputState, 0, sizeof(ovrInputState)); + return ovrError_NotInitialized; + } + return API.ovr_GetInputState.Ptr(session, controllerType, inputState); +} + +OVR_PUBLIC_FUNCTION(unsigned int) ovr_GetConnectedControllerTypes(ovrSession session) { + if (!API.ovr_GetConnectedControllerTypes.Ptr) { + return 0; + } + return API.ovr_GetConnectedControllerTypes.Ptr(session); +} + +OVR_PUBLIC_FUNCTION(ovrTouchHapticsDesc) +ovr_GetTouchHapticsDesc(ovrSession session, ovrControllerType controllerType) { + if (!API.ovr_GetTouchHapticsDesc.Ptr) { + ovrTouchHapticsDesc nullDesc; + memset(&nullDesc, 0, sizeof(nullDesc)); + return nullDesc; + } + + return API.ovr_GetTouchHapticsDesc.Ptr(session, controllerType); +} + +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_SetControllerVibration( + ovrSession session, + ovrControllerType controllerType, + float frequency, + float amplitude) { + if (!API.ovr_SetControllerVibration.Ptr) + return ovrError_NotInitialized; + + return API.ovr_SetControllerVibration.Ptr(session, controllerType, frequency, amplitude); +} + +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_SubmitControllerVibration( + ovrSession session, + ovrControllerType controllerType, + const ovrHapticsBuffer* buffer) { + if (!API.ovr_SubmitControllerVibration.Ptr) + return ovrError_NotInitialized; + + return API.ovr_SubmitControllerVibration.Ptr(session, controllerType, buffer); +} + +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_GetControllerVibrationState( + ovrSession session, + ovrControllerType controllerType, + ovrHapticsPlaybackState* outState) { + if (!API.ovr_GetControllerVibrationState.Ptr) + return ovrError_NotInitialized; + + return API.ovr_GetControllerVibrationState.Ptr(session, controllerType, outState); +} + +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_TestBoundary( + ovrSession session, + ovrTrackedDeviceType deviceBitmask, + ovrBoundaryType singleBoundaryType, + ovrBoundaryTestResult* outTestResult) { + if (!API.ovr_TestBoundary.Ptr) + return ovrError_NotInitialized; + + return API.ovr_TestBoundary.Ptr(session, deviceBitmask, singleBoundaryType, outTestResult); +} + +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_TestBoundaryPoint( + ovrSession session, + const ovrVector3f* point, + ovrBoundaryType singleBoundaryType, + ovrBoundaryTestResult* outTestResult) { + if (!API.ovr_TestBoundaryPoint.Ptr) + return ovrError_NotInitialized; + + return API.ovr_TestBoundaryPoint.Ptr(session, point, singleBoundaryType, outTestResult); +} + +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_SetBoundaryLookAndFeel(ovrSession session, const ovrBoundaryLookAndFeel* lookAndFeel) { + if (!API.ovr_SetBoundaryLookAndFeel.Ptr) + return ovrError_NotInitialized; + + return API.ovr_SetBoundaryLookAndFeel.Ptr(session, lookAndFeel); +} + +OVR_PUBLIC_FUNCTION(ovrResult) ovr_ResetBoundaryLookAndFeel(ovrSession session) { + if (!API.ovr_ResetBoundaryLookAndFeel.Ptr) + return ovrError_NotInitialized; + + return API.ovr_ResetBoundaryLookAndFeel.Ptr(session); +} + +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_GetBoundaryGeometry( + ovrSession session, + ovrBoundaryType singleBoundaryType, + ovrVector3f* outFloorPoints, + int* outFloorPointsCount) { + if (!API.ovr_GetBoundaryGeometry.Ptr) + return ovrError_NotInitialized; + + return API.ovr_GetBoundaryGeometry.Ptr( + session, singleBoundaryType, outFloorPoints, outFloorPointsCount); +} + +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_GetBoundaryDimensions( + ovrSession session, + ovrBoundaryType singleBoundaryType, + ovrVector3f* outDimensions) { + if (!API.ovr_GetBoundaryDimensions.Ptr) + return ovrError_NotInitialized; + + return API.ovr_GetBoundaryDimensions.Ptr(session, singleBoundaryType, outDimensions); +} + +OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetBoundaryVisible(ovrSession session, ovrBool* outIsVisible) { + if (!API.ovr_GetBoundaryVisible.Ptr) + return ovrError_NotInitialized; + + return API.ovr_GetBoundaryVisible.Ptr(session, outIsVisible); +} + +OVR_PUBLIC_FUNCTION(ovrResult) ovr_RequestBoundaryVisible(ovrSession session, ovrBool visible) { + if (!API.ovr_RequestBoundaryVisible.Ptr) + return ovrError_NotInitialized; + + return API.ovr_RequestBoundaryVisible.Ptr(session, visible); +} + +OVR_PUBLIC_FUNCTION(ovrSizei) +ovr_GetFovTextureSize( + ovrSession session, + ovrEyeType eye, + ovrFovPort fov, + float pixelsPerDisplayPixel) { + if (!API.ovr_GetFovTextureSize.Ptr) { + ovrSizei nullSize; + memset(&nullSize, 0, sizeof(nullSize)); + return nullSize; + } + + return API.ovr_GetFovTextureSize.Ptr(session, eye, fov, pixelsPerDisplayPixel); +} + +#if defined(_WIN32) +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_CreateTextureSwapChainDX( + ovrSession session, + IUnknown* d3dPtr, + const ovrTextureSwapChainDesc* desc, + ovrTextureSwapChain* outTextureSet) { + if (!API.ovr_CreateTextureSwapChainDX.Ptr) + return ovrError_NotInitialized; + + return API.ovr_CreateTextureSwapChainDX.Ptr(session, d3dPtr, desc, outTextureSet); +} + +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_CreateMirrorTextureDX( + ovrSession session, + IUnknown* d3dPtr, + const ovrMirrorTextureDesc* desc, + ovrMirrorTexture* outMirrorTexture) { + if (!API.ovr_CreateMirrorTextureDX.Ptr) + return ovrError_NotInitialized; + + return API.ovr_CreateMirrorTextureDX.Ptr(session, d3dPtr, desc, outMirrorTexture); +} + +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_CreateMirrorTextureWithOptionsDX( + ovrSession session, + IUnknown* d3dPtr, + const ovrMirrorTextureDesc* desc, + ovrMirrorTexture* outMirrorTexture) { + if (!API.ovr_CreateMirrorTextureWithOptionsDX.Ptr) + return ovrError_NotInitialized; + + return API.ovr_CreateMirrorTextureWithOptionsDX.Ptr(session, d3dPtr, desc, outMirrorTexture); +} + +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_GetTextureSwapChainBufferDX( + ovrSession session, + ovrTextureSwapChain chain, + int index, + IID iid, + void** ppObject) { + if (!API.ovr_GetTextureSwapChainBufferDX.Ptr) + return ovrError_NotInitialized; + + return API.ovr_GetTextureSwapChainBufferDX.Ptr(session, chain, index, iid, ppObject); +} + +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_GetMirrorTextureBufferDX( + ovrSession session, + ovrMirrorTexture mirror, + IID iid, + void** ppObject) { + if (!API.ovr_GetMirrorTextureBufferDX.Ptr) + return ovrError_NotInitialized; + + return API.ovr_GetMirrorTextureBufferDX.Ptr(session, mirror, iid, ppObject); +} + +OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetAudioDeviceOutWaveId(unsigned int* deviceOutId) { + if (!API.ovr_GetAudioDeviceOutWaveId.Ptr) + return ovrError_NotInitialized; + + return API.ovr_GetAudioDeviceOutWaveId.Ptr(deviceOutId); +} + +OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetAudioDeviceInWaveId(unsigned int* deviceInId) { + if (!API.ovr_GetAudioDeviceInWaveId.Ptr) + return ovrError_NotInitialized; + + return API.ovr_GetAudioDeviceInWaveId.Ptr(deviceInId); +} + +OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetAudioDeviceOutGuidStr(WCHAR* deviceOutStrBuffer) { + if (!API.ovr_GetAudioDeviceOutGuidStr.Ptr) + return ovrError_NotInitialized; + + return API.ovr_GetAudioDeviceOutGuidStr.Ptr(deviceOutStrBuffer); +} + +OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetAudioDeviceOutGuid(GUID* deviceOutGuid) { + if (!API.ovr_GetAudioDeviceOutGuid.Ptr) + return ovrError_NotInitialized; + + return API.ovr_GetAudioDeviceOutGuid.Ptr(deviceOutGuid); +} + +OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetAudioDeviceInGuidStr(WCHAR* deviceInStrBuffer) { + if (!API.ovr_GetAudioDeviceInGuidStr.Ptr) + return ovrError_NotInitialized; + + return API.ovr_GetAudioDeviceInGuidStr.Ptr(deviceInStrBuffer); +} + +OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetAudioDeviceInGuid(GUID* deviceInGuid) { + if (!API.ovr_GetAudioDeviceInGuid.Ptr) + return ovrError_NotInitialized; + + return API.ovr_GetAudioDeviceInGuid.Ptr(deviceInGuid); +} + +#endif + +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_CreateTextureSwapChainGL( + ovrSession session, + const ovrTextureSwapChainDesc* desc, + ovrTextureSwapChain* outTextureSet) { + if (!API.ovr_CreateTextureSwapChainGL.Ptr) + return ovrError_NotInitialized; + + return API.ovr_CreateTextureSwapChainGL.Ptr(session, desc, outTextureSet); +} + +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_CreateMirrorTextureGL( + ovrSession session, + const ovrMirrorTextureDesc* desc, + ovrMirrorTexture* outMirrorTexture) { + if (!API.ovr_CreateMirrorTextureGL.Ptr) + return ovrError_NotInitialized; + + return API.ovr_CreateMirrorTextureGL.Ptr(session, desc, outMirrorTexture); +} + +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_CreateMirrorTextureWithOptionsGL( + ovrSession session, + const ovrMirrorTextureDesc* desc, + ovrMirrorTexture* outMirrorTexture) { + if (!API.ovr_CreateMirrorTextureWithOptionsGL.Ptr) + return ovrError_NotInitialized; + + return API.ovr_CreateMirrorTextureWithOptionsGL.Ptr(session, desc, outMirrorTexture); +} + +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_GetTextureSwapChainBufferGL( + ovrSession session, + ovrTextureSwapChain chain, + int index, + unsigned int* texId) { + if (!API.ovr_GetTextureSwapChainBufferGL.Ptr) + return ovrError_NotInitialized; + + return API.ovr_GetTextureSwapChainBufferGL.Ptr(session, chain, index, texId); +} + +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_GetMirrorTextureBufferGL(ovrSession session, ovrMirrorTexture mirror, unsigned int* texId) { + if (!API.ovr_GetMirrorTextureBufferGL.Ptr) + return ovrError_NotInitialized; + + return API.ovr_GetMirrorTextureBufferGL.Ptr(session, mirror, texId); +} + +#if !defined(OSX_UNIMPLEMENTED) +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_GetInstanceExtensionsVk( + ovrGraphicsLuid luid, + char* extensionNames, + uint32_t* inoutExtensionNamesSize) { + if (!API.ovr_GetInstanceExtensionsVk.Ptr) + return ovrError_NotInitialized; + + return API.ovr_GetInstanceExtensionsVk.Ptr(luid, extensionNames, inoutExtensionNamesSize); +} + +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_GetDeviceExtensionsVk( + ovrGraphicsLuid luid, + char* extensionNames, + uint32_t* inoutExtensionNamesSize) { + if (!API.ovr_GetDeviceExtensionsVk.Ptr) + return ovrError_NotInitialized; + + return API.ovr_GetDeviceExtensionsVk.Ptr(luid, extensionNames, inoutExtensionNamesSize); +} + +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_GetSessionPhysicalDeviceVk( + ovrSession session, + ovrGraphicsLuid luid, + VkInstance instance, + VkPhysicalDevice* out_physicalDevice) { + if (!API.ovr_GetSessionPhysicalDeviceVk.Ptr) + return ovrError_NotInitialized; + + return API.ovr_GetSessionPhysicalDeviceVk.Ptr(session, luid, instance, out_physicalDevice); +} + +OVR_PUBLIC_FUNCTION(ovrResult) ovr_SetSynchronizationQueueVk(ovrSession session, VkQueue queue) { + if (!API.ovr_SetSynchronizationQueueVk.Ptr) + return ovrError_NotInitialized; + + return API.ovr_SetSynchronizationQueueVk.Ptr(session, queue); +} + +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_CreateTextureSwapChainVk( + ovrSession session, + VkDevice device, + const ovrTextureSwapChainDesc* desc, + ovrTextureSwapChain* out_TextureSwapChain) { + if (!API.ovr_CreateTextureSwapChainVk.Ptr) + return ovrError_NotInitialized; + + return API.ovr_CreateTextureSwapChainVk.Ptr(session, device, desc, out_TextureSwapChain); +} + +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_GetTextureSwapChainBufferVk( + ovrSession session, + ovrTextureSwapChain chain, + int index, + VkImage* out_Image) { + if (!API.ovr_GetTextureSwapChainBufferVk.Ptr) + return ovrError_NotInitialized; + + return API.ovr_GetTextureSwapChainBufferVk.Ptr(session, chain, index, out_Image); +} + +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_CreateMirrorTextureWithOptionsVk( + ovrSession session, + VkDevice device, + const ovrMirrorTextureDesc* desc, + ovrMirrorTexture* out_MirrorTexture) { + if (!API.ovr_CreateMirrorTextureWithOptionsVk.Ptr) + return ovrError_NotInitialized; + + return API.ovr_CreateMirrorTextureWithOptionsVk.Ptr(session, device, desc, out_MirrorTexture); +} + +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_GetMirrorTextureBufferVk( + ovrSession session, + ovrMirrorTexture mirrorTexture, + VkImage* out_Image) { + if (!API.ovr_GetMirrorTextureBufferVk.Ptr) + return ovrError_NotInitialized; + + return API.ovr_GetMirrorTextureBufferVk.Ptr(session, mirrorTexture, out_Image); +} +#endif // OSX_UNIMPLEMENTED + +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_GetTextureSwapChainLength(ovrSession session, ovrTextureSwapChain chain, int* length) { + if (!API.ovr_GetTextureSwapChainLength.Ptr) + return ovrError_NotInitialized; + + return API.ovr_GetTextureSwapChainLength.Ptr(session, chain, length); +} + +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_GetTextureSwapChainCurrentIndex( + ovrSession session, + ovrTextureSwapChain chain, + int* currentIndex) { + if (!API.ovr_GetTextureSwapChainCurrentIndex.Ptr) + return ovrError_NotInitialized; + + return API.ovr_GetTextureSwapChainCurrentIndex.Ptr(session, chain, currentIndex); +} + +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_GetTextureSwapChainDesc( + ovrSession session, + ovrTextureSwapChain chain, + ovrTextureSwapChainDesc* desc) { + if (!API.ovr_GetTextureSwapChainDesc.Ptr) + return ovrError_NotInitialized; + + return API.ovr_GetTextureSwapChainDesc.Ptr(session, chain, desc); +} + +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_CommitTextureSwapChain(ovrSession session, ovrTextureSwapChain chain) { + if (!API.ovr_CommitTextureSwapChain.Ptr) + return ovrError_NotInitialized; + + return API.ovr_CommitTextureSwapChain.Ptr(session, chain); +} + +OVR_PUBLIC_FUNCTION(void) +ovr_DestroyTextureSwapChain(ovrSession session, ovrTextureSwapChain chain) { + if (!API.ovr_DestroyTextureSwapChain.Ptr) + return; + + API.ovr_DestroyTextureSwapChain.Ptr(session, chain); +} + +OVR_PUBLIC_FUNCTION(void) +ovr_DestroyMirrorTexture(ovrSession session, ovrMirrorTexture mirrorTexture) { + if (!API.ovr_DestroyMirrorTexture.Ptr) + return; + + API.ovr_DestroyMirrorTexture.Ptr(session, mirrorTexture); +} + +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_WaitToBeginFrame(ovrSession session, long long frameIndex) { + if (!API.ovr_WaitToBeginFrame.Ptr) + return ovrError_NotInitialized; + + return API.ovr_WaitToBeginFrame.Ptr(session, frameIndex); +} + +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_BeginFrame(ovrSession session, long long frameIndex) { + if (!API.ovr_BeginFrame.Ptr) + return ovrError_NotInitialized; + + return API.ovr_BeginFrame.Ptr(session, frameIndex); +} + +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_EndFrame( + ovrSession session, + long long frameIndex, + const ovrViewScaleDesc* viewScaleDesc, + ovrLayerHeader const* const* layerPtrList, + unsigned int layerCount) { + if (!API.ovr_EndFrame.Ptr) + return ovrError_NotInitialized; + + return API.ovr_EndFrame.Ptr(session, frameIndex, viewScaleDesc, layerPtrList, layerCount); +} + +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_SubmitFrame( + ovrSession session, + long long frameIndex, + const ovrViewScaleDesc* viewScaleDesc, + ovrLayerHeader const* const* layerPtrList, + unsigned int layerCount) { + if (!API.ovr_SubmitFrame.Ptr) + return ovrError_NotInitialized; + + return API.ovr_SubmitFrame.Ptr(session, frameIndex, viewScaleDesc, layerPtrList, layerCount); +} + +OVR_PUBLIC_FUNCTION(ovrEyeRenderDesc) +ovr_GetRenderDesc(ovrSession session, ovrEyeType eyeType, ovrFovPort fov) { + if (!API.ovr_GetRenderDesc.Ptr) { + ovrEyeRenderDesc nullEyeRenderDesc; + memset(&nullEyeRenderDesc, 0, sizeof(nullEyeRenderDesc)); + return nullEyeRenderDesc; + } + return API.ovr_GetRenderDesc.Ptr(session, eyeType, fov); +} + +OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetPerfStats(ovrSession session, ovrPerfStats* outPerfStats) { + if (!API.ovr_GetPerfStats.Ptr) + return ovrError_NotInitialized; + + return API.ovr_GetPerfStats.Ptr(session, outPerfStats); +} + +OVR_PUBLIC_FUNCTION(ovrResult) ovr_ResetPerfStats(ovrSession session) { + if (!API.ovr_ResetPerfStats.Ptr) + return ovrError_NotInitialized; + + return API.ovr_ResetPerfStats.Ptr(session); +} + +OVR_PUBLIC_FUNCTION(double) ovr_GetPredictedDisplayTime(ovrSession session, long long frameIndex) { + if (!API.ovr_GetPredictedDisplayTime.Ptr) + return 0.0; + + return API.ovr_GetPredictedDisplayTime.Ptr(session, frameIndex); +} + +OVR_PUBLIC_FUNCTION(double) ovr_GetTimeInSeconds() { + if (!API.ovr_GetTimeInSeconds.Ptr) + return 0.; + return API.ovr_GetTimeInSeconds.Ptr(); +} + +OVR_PUBLIC_FUNCTION(ovrBool) +ovr_GetBool(ovrSession session, const char* propertyName, ovrBool defaultVal) { + if (!API.ovr_GetBool.Ptr) + return ovrFalse; + return API.ovr_GetBool.Ptr(session, propertyName, defaultVal); +} + +OVR_PUBLIC_FUNCTION(ovrBool) +ovr_SetBool(ovrSession session, const char* propertyName, ovrBool value) { + if (!API.ovr_SetBool.Ptr) + return ovrFalse; + return API.ovr_SetBool.Ptr(session, propertyName, value); +} + +OVR_PUBLIC_FUNCTION(int) ovr_GetInt(ovrSession session, const char* propertyName, int defaultVal) { + if (!API.ovr_GetInt.Ptr) + return 0; + return API.ovr_GetInt.Ptr(session, propertyName, defaultVal); +} + +OVR_PUBLIC_FUNCTION(ovrBool) ovr_SetInt(ovrSession session, const char* propertyName, int value) { + if (!API.ovr_SetInt.Ptr) + return ovrFalse; + return API.ovr_SetInt.Ptr(session, propertyName, value); +} + +OVR_PUBLIC_FUNCTION(float) +ovr_GetFloat(ovrSession session, const char* propertyName, float defaultVal) { + if (!API.ovr_GetFloat.Ptr) + return 0.f; + return API.ovr_GetFloat.Ptr(session, propertyName, defaultVal); +} + +OVR_PUBLIC_FUNCTION(ovrBool) +ovr_SetFloat(ovrSession session, const char* propertyName, float value) { + if (!API.ovr_SetFloat.Ptr) + return ovrFalse; + return API.ovr_SetFloat.Ptr(session, propertyName, value); +} + +OVR_PUBLIC_FUNCTION(unsigned int) +ovr_GetFloatArray( + ovrSession session, + const char* propertyName, + float values[], + unsigned int arraySize) { + if (!API.ovr_GetFloatArray.Ptr) + return 0; + return API.ovr_GetFloatArray.Ptr(session, propertyName, values, arraySize); +} + +OVR_PUBLIC_FUNCTION(ovrBool) +ovr_SetFloatArray( + ovrSession session, + const char* propertyName, + const float values[], + unsigned int arraySize) { + if (!API.ovr_SetFloatArray.Ptr) + return ovrFalse; + return API.ovr_SetFloatArray.Ptr(session, propertyName, values, arraySize); +} + +OVR_PUBLIC_FUNCTION(const char*) +ovr_GetString(ovrSession session, const char* propertyName, const char* defaultVal) { + if (!API.ovr_GetString.Ptr) + return "(Unable to load LibOVR)"; + return API.ovr_GetString.Ptr(session, propertyName, defaultVal); +} + +OVR_PUBLIC_FUNCTION(ovrBool) +ovr_SetString(ovrSession session, const char* propertyName, const char* value) { + if (!API.ovr_SetString.Ptr) + return ovrFalse; + return API.ovr_SetString.Ptr(session, propertyName, value); +} + +OVR_PUBLIC_FUNCTION(int) ovr_TraceMessage(int level, const char* message) { + if (!API.ovr_TraceMessage.Ptr) + return -1; + + return API.ovr_TraceMessage.Ptr(level, message); +} + +OVR_PUBLIC_FUNCTION(ovrResult) ovr_IdentifyClient(const char* identity) { + if (!API.ovr_IdentifyClient.Ptr) + return ovrError_NotInitialized; + + return API.ovr_IdentifyClient.Ptr(identity); +} + +OVR_PUBLIC_FUNCTION(ovrResult) ovr_Lookup(const char* name, void** data) { + if (!API.ovr_Lookup.Ptr) + return ovrError_NotInitialized; + return API.ovr_Lookup.Ptr(name, data); +} + +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_GetExternalCameras( + ovrSession session, + ovrExternalCamera* outCameras, + unsigned int* outCameraCount) { + if (!API.ovr_GetExternalCameras.Ptr) + return ovrError_NotInitialized; + if (!outCameras || !outCameraCount) + return ovrError_InvalidParameter; + + return API.ovr_GetExternalCameras.Ptr(session, outCameras, outCameraCount); +} + +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_SetExternalCameraProperties( + ovrSession session, + const char* name, + const ovrCameraIntrinsics* const intrinsics, + const ovrCameraExtrinsics* const extrinsics) { + if (!API.ovr_SetExternalCameraProperties.Ptr) + return ovrError_NotInitialized; + if (!name || (!intrinsics && !extrinsics)) + return ovrError_InvalidParameter; + + return API.ovr_SetExternalCameraProperties.Ptr(session, name, intrinsics, extrinsics); +} +#if defined(_MSC_VER) +#pragma warning(pop) +#endif diff --git a/Shim/OVR_CAPIShim.o b/Shim/OVR_CAPIShim.o new file mode 100644 index 0000000..3d8f8c5 Binary files /dev/null and b/Shim/OVR_CAPIShim.o differ diff --git a/Shim/OVR_CAPI_Prototypes.h b/Shim/OVR_CAPI_Prototypes.h new file mode 100755 index 0000000..8765ae6 --- /dev/null +++ b/Shim/OVR_CAPI_Prototypes.h @@ -0,0 +1,148 @@ +/********************************************************************************/ /** + \file OVR_CAPI_Prototypes.h + \brief Internal CAPI prototype listing macros + \copyright Copyright 2016 Oculus VR, LLC. All Rights reserved. + ************************************************************************************/ + +#ifndef OVR_CAPI_Prototypes_h +#define OVR_CAPI_Prototypes_h + +#include "OVR_CAPI.h" + + +// +// OVR_LIST_*_APIS - apply passed in macros to a list of API entrypoints +// +// The _ macro argument is applied for all current API versions +// The X macro argument is applied for back-compat API versions +// +// The tuple passed to either macro is (ReturnType, FunctionName, OptionalVersion, ParameterList) +// + + +// clang-format off + +#define OVR_LIST_PUBLIC_APIS(_,X) \ +X(ovrBool, ovr_InitializeRenderingShimVersion, , (int requestedMinorVersion)) \ +_(ovrResult, ovr_Initialize, , (const ovrInitParams* params)) \ +_(void, ovr_Shutdown, , (void)) \ +_(const char*, ovr_GetVersionString, , (void)) \ +_(void, ovr_GetLastErrorInfo, , (ovrErrorInfo* errorInfo)) \ +_(ovrHmdDesc, ovr_GetHmdDesc, , (ovrSession session)) \ +_(unsigned int, ovr_GetTrackerCount, , (ovrSession session)) \ +_(ovrTrackerDesc, ovr_GetTrackerDesc, , (ovrSession session, unsigned int trackerDescIndex)) \ +_(ovrResult, ovr_Create, , (ovrSession* pSession, ovrGraphicsLuid* pLuid)) \ +_(void, ovr_Destroy, , (ovrSession session)) \ +_(ovrResult, ovr_GetSessionStatus, , (ovrSession session, ovrSessionStatus* sessionStatus)) \ +_(ovrResult, ovr_IsExtensionSupported, , (ovrSession session, ovrExtensions extension, ovrBool* outExtensionSupported)) \ +_(ovrResult, ovr_EnableExtension, , (ovrSession session, ovrExtensions extension)) \ +_(ovrResult, ovr_SetTrackingOriginType, , (ovrSession session, ovrTrackingOrigin origin)) \ +_(ovrTrackingOrigin, ovr_GetTrackingOriginType, , (ovrSession session)) \ +_(ovrResult, ovr_RecenterTrackingOrigin, , (ovrSession session)) \ +_(ovrResult, ovr_SpecifyTrackingOrigin, , (ovrSession session, ovrPosef originPose)) \ +_(void, ovr_ClearShouldRecenterFlag, , (ovrSession session)) \ +_(ovrTrackingState, ovr_GetTrackingState, , (ovrSession session, double absTime, ovrBool latencyMarker)) \ +_(ovrTrackerPose, ovr_GetTrackerPose, , (ovrSession session, unsigned int index)) \ +_(ovrResult, ovr_GetInputState, , (ovrSession session, ovrControllerType controllerType, ovrInputState*)) \ +_(unsigned int, ovr_GetConnectedControllerTypes, , (ovrSession session)) \ +_(ovrSizei, ovr_GetFovTextureSize, , (ovrSession session, ovrEyeType eye, ovrFovPort fov, float pixelsPerDisplayPixel)) \ +_(ovrResult, ovr_WaitToBeginFrame, , (ovrSession session, long long frameIndex)) \ +_(ovrResult, ovr_BeginFrame, , (ovrSession session, long long frameIndex)) \ +_(ovrResult, ovr_EndFrame, , (ovrSession session, long long frameIndex, const ovrViewScaleDesc* viewScaleDesc, ovrLayerHeader const * const * layerPtrList, unsigned int layerCount)) \ +X(ovrResult, ovr_SubmitFrame, , (ovrSession session, long long frameIndex, const ovrViewScaleDescPre117* viewScaleDesc, ovrLayerHeader const * const * layerPtrList, unsigned int layerCount)) \ +_(ovrResult, ovr_SubmitFrame, 2, (ovrSession session, long long frameIndex, const ovrViewScaleDesc* viewScaleDesc, ovrLayerHeader const * const * layerPtrList, unsigned int layerCount)) \ +X(ovrEyeRenderDescPre117, ovr_GetRenderDesc, , (ovrSession session, ovrEyeType eyeType, ovrFovPort fov)) \ +_(ovrEyeRenderDesc, ovr_GetRenderDesc, 2, (ovrSession session, ovrEyeType eyeType, ovrFovPort fov)) \ +_(double, ovr_GetPredictedDisplayTime, , (ovrSession session, long long frameIndex)) \ +_(double, ovr_GetTimeInSeconds, , (void)) \ +_(ovrBool, ovr_GetBool, , (ovrSession session, const char* propertyName, ovrBool defaultVal)) \ +_(ovrBool, ovr_SetBool, , (ovrSession session, const char* propertyName, ovrBool value)) \ +_(int, ovr_GetInt, , (ovrSession session, const char* propertyName, int defaultVal)) \ +_(ovrBool, ovr_SetInt, , (ovrSession session, const char* propertyName, int value)) \ +_(float, ovr_GetFloat, , (ovrSession session, const char* propertyName, float defaultVal)) \ +_(ovrBool, ovr_SetFloat, , (ovrSession session, const char* propertyName, float value)) \ +_(unsigned int, ovr_GetFloatArray, , (ovrSession session, const char* propertyName, float values[], unsigned int arraySize)) \ +_(ovrBool, ovr_SetFloatArray, , (ovrSession session, const char* propertyName, const float values[], unsigned int arraySize)) \ +_(const char*, ovr_GetString, , (ovrSession session, const char* propertyName, const char* defaultVal)) \ +_(ovrBool, ovr_SetString, , (ovrSession session, const char* propertyName, const char* value)) \ +_(int, ovr_TraceMessage, , (int level, const char* message)) \ +_(ovrResult, ovr_IdentifyClient, , (const char* identity)) \ +_(ovrResult, ovr_CreateTextureSwapChainGL, , (ovrSession session, const ovrTextureSwapChainDesc* desc, ovrTextureSwapChain* outTextureChain)) \ +_(ovrResult, ovr_CreateMirrorTextureGL, , (ovrSession session, const ovrMirrorTextureDesc* desc, ovrMirrorTexture* outMirrorTexture)) \ +_(ovrResult, ovr_CreateMirrorTextureWithOptionsGL, , (ovrSession session, const ovrMirrorTextureDesc* desc, ovrMirrorTexture* outMirrorTexture)) \ +_(ovrResult, ovr_GetTextureSwapChainBufferGL, , (ovrSession session, ovrTextureSwapChain chain, int index, unsigned int* texId)) \ +_(ovrResult, ovr_GetMirrorTextureBufferGL, , (ovrSession session, ovrMirrorTexture mirror, unsigned int* texId)) \ +_(ovrResult, ovr_GetTextureSwapChainLength, , (ovrSession session, ovrTextureSwapChain chain, int* length)) \ +_(ovrResult, ovr_GetTextureSwapChainCurrentIndex, , (ovrSession session, ovrTextureSwapChain chain, int* currentIndex)) \ +_(ovrResult, ovr_GetTextureSwapChainDesc, , (ovrSession session, ovrTextureSwapChain chain, ovrTextureSwapChainDesc* desc)) \ +_(ovrResult, ovr_CommitTextureSwapChain, , (ovrSession session, ovrTextureSwapChain chain)) \ +_(void, ovr_DestroyTextureSwapChain, , (ovrSession session, ovrTextureSwapChain chain)) \ +_(void, ovr_DestroyMirrorTexture, , (ovrSession session, ovrMirrorTexture texture)) \ +X(ovrResult, ovr_SetQueueAheadFraction, , (ovrSession session, float queueAheadFraction)) \ +_(ovrResult, ovr_Lookup, , (const char* name, void** data)) \ +_(ovrTouchHapticsDesc, ovr_GetTouchHapticsDesc, , (ovrSession session, ovrControllerType controllerType)) \ +_(ovrResult, ovr_SetControllerVibration, , (ovrSession session, ovrControllerType controllerType, float frequency, float amplitude)) \ +_(ovrResult, ovr_SubmitControllerVibration, , (ovrSession session, ovrControllerType controllerType, const ovrHapticsBuffer* buffer)) \ +_(ovrResult, ovr_GetControllerVibrationState, , (ovrSession session, ovrControllerType controllerType, ovrHapticsPlaybackState* outState)) \ +_(ovrResult, ovr_TestBoundary, , (ovrSession session, ovrTrackedDeviceType deviceBitmask, ovrBoundaryType singleBoundaryType, ovrBoundaryTestResult* outTestResult)) \ +_(ovrResult, ovr_TestBoundaryPoint, , (ovrSession session, const ovrVector3f* point, ovrBoundaryType singleBoundaryType, ovrBoundaryTestResult* outTestResult)) \ +_(ovrResult, ovr_SetBoundaryLookAndFeel, , (ovrSession session, const ovrBoundaryLookAndFeel* lookAndFeel)) \ +_(ovrResult, ovr_ResetBoundaryLookAndFeel, , (ovrSession session)) \ +_(ovrResult, ovr_GetBoundaryGeometry, , (ovrSession session, ovrBoundaryType singleBoundaryType, ovrVector3f* outFloorPoints, int* outFloorPointsCount)) \ +_(ovrResult, ovr_GetBoundaryDimensions, , (ovrSession session, ovrBoundaryType singleBoundaryType, ovrVector3f* outDimension)) \ +_(ovrResult, ovr_GetBoundaryVisible, , (ovrSession session, ovrBool* outIsVisible)) \ +_(ovrResult, ovr_RequestBoundaryVisible, , (ovrSession session, ovrBool visible)) \ +_(ovrResult, ovr_GetPerfStats, , (ovrSession session, ovrPerfStats* outPerfStats)) \ +_(ovrResult, ovr_ResetPerfStats, , (ovrSession session))\ +_(ovrResult, ovr_GetExternalCameras, , (ovrSession session, ovrExternalCamera* outCameras, unsigned int* outCameraCount))\ +_(ovrResult, ovr_SetExternalCameraProperties, , (ovrSession session, const char* name, const ovrCameraIntrinsics* const intrinsics, const ovrCameraExtrinsics* const extrinsics ))\ + +#if defined (_WIN32) +#define OVR_LIST_WIN32_APIS(_,X) \ + _(ovrResult, ovr_CreateTextureSwapChainDX, , (ovrSession session, IUnknown* d3dPtr, const ovrTextureSwapChainDesc* desc, ovrTextureSwapChain* outTextureChain)) \ + _(ovrResult, ovr_CreateMirrorTextureDX, , (ovrSession session, IUnknown* d3dPtr, const ovrMirrorTextureDesc* desc, ovrMirrorTexture* outMirrorTexture)) \ + _(ovrResult, ovr_CreateMirrorTextureWithOptionsDX, , (ovrSession session, IUnknown* d3dPtr, const ovrMirrorTextureDesc* desc, ovrMirrorTexture* outMirrorTexture)) \ + _(ovrResult, ovr_GetTextureSwapChainBufferDX, , (ovrSession session, ovrTextureSwapChain chain, int index, IID iid, void** ppObject)) \ + _(ovrResult, ovr_GetMirrorTextureBufferDX, , (ovrSession session, ovrMirrorTexture mirror, IID iid, void** ppObject)) \ + _(ovrResult, ovr_GetAudioDeviceOutWaveId, , (UINT* deviceOutId)) \ + _(ovrResult, ovr_GetAudioDeviceInWaveId, , (UINT* deviceInId)) \ + _(ovrResult, ovr_GetAudioDeviceOutGuidStr, , (WCHAR* deviceOutStrBuffer)) \ + _(ovrResult, ovr_GetAudioDeviceOutGuid, , (GUID* deviceOutGuid)) \ + _(ovrResult, ovr_GetAudioDeviceInGuidStr, , (WCHAR* deviceInStrBuffer)) \ + _(ovrResult, ovr_GetAudioDeviceInGuid, , (GUID* deviceInGuid)) \ + _(ovrResult, ovr_GetInstanceExtensionsVk, , (ovrGraphicsLuid luid, char* extensionNames, uint32_t* inoutExtensionNamesSize)) \ + _(ovrResult, ovr_GetDeviceExtensionsVk, , (ovrGraphicsLuid luid, char* extensionNames, uint32_t* inoutExtensionNamesSize)) \ + _(ovrResult, ovr_GetSessionPhysicalDeviceVk, , (ovrSession session, ovrGraphicsLuid luid, VkInstance instance, VkPhysicalDevice* out_physicalDevice)) \ + X(ovrResult, ovr_SetSynchonizationQueueVk, , (ovrSession session, VkQueue queue)) \ + _(ovrResult, ovr_SetSynchronizationQueueVk, , (ovrSession session, VkQueue queue)) \ + _(ovrResult, ovr_CreateTextureSwapChainVk, , (ovrSession session, VkDevice device, const ovrTextureSwapChainDesc* desc, ovrTextureSwapChain* out_TextureSwapChain)) \ + _(ovrResult, ovr_GetTextureSwapChainBufferVk, , (ovrSession session, ovrTextureSwapChain chain, int index, VkImage* out_Image)) \ + _(ovrResult, ovr_CreateMirrorTextureWithOptionsVk, , (ovrSession session, VkDevice device, const ovrMirrorTextureDesc* desc, ovrMirrorTexture* out_MirrorTexture)) \ + _(ovrResult, ovr_GetMirrorTextureBufferVk, , (ovrSession session, ovrMirrorTexture mirrorTexture, VkImage* out_Image)) +#else +#define OVR_LIST_WIN32_APIS(_,X) +#endif + +#define OVR_LIST_INTERNAL_APIS(_,X) + +// We need to forward declare the ovrSensorData type here, as it won't be in a public OVR_CAPI.h header. +struct ovrSensorData_; +typedef struct ovrSensorData_ ovrSensorData; + +#define OVR_LIST_PRIVATE_APIS(_,X) \ +_(ovrTrackingState, ovr_GetTrackingStateWithSensorData, , (ovrSession session, double absTime, ovrBool latencyMarker, ovrSensorData* sensorData)) \ +_(ovrResult, ovr_GetDevicePoses, , (ovrSession session, ovrTrackedDeviceType* deviceTypes, int deviceCount, double absTime, ovrPoseStatef* outDevicePoses)) + +// clang-format on + +// +// OVR_LIST_APIS - master list of all API entrypoints +// + +#define OVR_LIST_APIS(_, X) \ + OVR_LIST_PUBLIC_APIS(_, X) \ + OVR_LIST_WIN32_APIS(_, X) \ + OVR_LIST_INTERNAL_APIS(_, X) \ + OVR_LIST_PRIVATE_APIS(_, X) + +#endif // OVR_CAPI_Prototypes_h diff --git a/Shim/OVR_CAPI_Util.cpp b/Shim/OVR_CAPI_Util.cpp new file mode 100755 index 0000000..024c9e1 --- /dev/null +++ b/Shim/OVR_CAPI_Util.cpp @@ -0,0 +1,429 @@ +/************************************************************************************ + +PublicHeader: OVR_CAPI_Util.c +Copyright : Copyright 2014-2016 Oculus VR, LLC All Rights reserved. + +Licensed under the Oculus VR Rift SDK License Version 3.3 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, +which is provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +You may obtain a copy of the License at + +http://www.oculusvr.com/licenses/LICENSE-3.3 + +Unless required by applicable law or agreed to in writing, the Oculus VR SDK +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*************************************************************************************/ + +#include +#include + +#include +#include +#include + +#if !defined(_WIN32) +#include +#endif + +#if defined(_MSC_VER) && _MSC_VER < 1800 // MSVC < 2013 +#define round(dbl) \ + (dbl) >= 0.0 ? (int)((dbl) + 0.5) \ + : (((dbl) - (double)(int)(dbl)) <= -0.5 ? (int)(dbl) : (int)((dbl)-0.5)) +#endif + + +#if defined(_MSC_VER) +#include +#pragma intrinsic(_mm_pause) +#endif + +#if defined(_WIN32) +// Prevents from defining min() and max() macro symbols. +#ifndef NOMINMAX +#define NOMINMAX +#endif + +#include +#endif + +// Used to generate projection from ovrEyeDesc::Fov +OVR_PUBLIC_FUNCTION(ovrMatrix4f) +ovrMatrix4f_Projection(ovrFovPort fov, float znear, float zfar, unsigned int projectionModFlags) { + bool leftHanded = (projectionModFlags & ovrProjection_LeftHanded) > 0; + bool flipZ = (projectionModFlags & ovrProjection_FarLessThanNear) > 0; + bool farAtInfinity = (projectionModFlags & ovrProjection_FarClipAtInfinity) > 0; + bool isOpenGL = (projectionModFlags & ovrProjection_ClipRangeOpenGL) > 0; + + // TODO: Pass in correct eye to CreateProjection if we want to support canted displays from CAPI + return OVR::CreateProjection( + leftHanded, isOpenGL, fov, OVR::StereoEye_Center, znear, zfar, flipZ, farAtInfinity); +} + +OVR_PUBLIC_FUNCTION(ovrTimewarpProjectionDesc) +ovrTimewarpProjectionDesc_FromProjection(ovrMatrix4f Projection, unsigned int projectionModFlags) { + ovrTimewarpProjectionDesc res; + res.Projection22 = Projection.M[2][2]; + res.Projection23 = Projection.M[2][3]; + res.Projection32 = Projection.M[3][2]; + + if ((res.Projection32 != 1.0f) && (res.Projection32 != -1.0f)) { + // This is a very strange projection matrix, and probably won't work. + // If you need it to work, please contact Oculus and let us know your usage scenario. + } + + if ((projectionModFlags & ovrProjection_ClipRangeOpenGL) != 0) { + // Internally we use the D3D range of [0,+w] not the OGL one of [-w,+w], so we need to convert + // one to the other. + // Note that the values in the depth buffer, and the actual linear depth we want is the same for + // both APIs, + // the difference is purely in the values inside the projection matrix. + + // D3D does this: + // depthBuffer = ( ProjD3D.M[2][2] * linearDepth + ProjD3D.M[2][3] ) / ( linearDepth + // * ProjD3D.M[3][2] ); + // OGL does this: + // depthBuffer = 0.5 + 0.5 * ( ProjOGL.M[2][2] * linearDepth + ProjOGL.M[2][3] ) / ( linearDepth + // * ProjOGL.M[3][2] ); + + // Therefore: + // ProjD3D.M[2][2] = 0.5 * ( ProjOGL.M[2][2] + ProjOGL.M[3][2] ); + // ProjD3D.M[2][3] = 0.5 * ProjOGL.M[2][3]; + // ProjD3D.M[3][2] = ProjOGL.M[3][2]; + + res.Projection22 = 0.5f * (Projection.M[2][2] + Projection.M[3][2]); + res.Projection23 = 0.5f * Projection.M[2][3]; + res.Projection32 = Projection.M[3][2]; + } + return res; +} + +OVR_PUBLIC_FUNCTION(ovrMatrix4f) +ovrMatrix4f_OrthoSubProjection( + ovrMatrix4f projection, + ovrVector2f orthoScale, + float orthoDistance, + float hmdToEyeOffsetX) { + ovrMatrix4f ortho; + // Negative sign is correct! + // If the eye is offset to the left, then the ortho view needs to be offset to the right relative + // to the camera. + float orthoHorizontalOffset = -hmdToEyeOffsetX / orthoDistance; + + // Current projection maps real-world vector (x,y,1) to the RT. + // We want to find the projection that maps the range [-FovPixels/2,FovPixels/2] to + // the physical [-orthoHalfFov,orthoHalfFov] + // Note moving the offset from M[0][2]+M[1][2] to M[0][3]+M[1][3] - this means + // we don't have to feed in Z=1 all the time. + // The horizontal offset math is a little hinky because the destination is + // actually [-orthoHalfFov+orthoHorizontalOffset,orthoHalfFov+orthoHorizontalOffset] + // So we need to first map [-FovPixels/2,FovPixels/2] to + // [-orthoHalfFov+orthoHorizontalOffset,orthoHalfFov+orthoHorizontalOffset]: + // x1 = x0 * orthoHalfFov/(FovPixels/2) + orthoHorizontalOffset; + // = x0 * 2*orthoHalfFov/FovPixels + orthoHorizontalOffset; + // But then we need the same mapping as the existing projection matrix, i.e. + // x2 = x1 * Projection.M[0][0] + Projection.M[0][2]; + // = x0 * (2*orthoHalfFov/FovPixels + orthoHorizontalOffset) * Projection.M[0][0] + + // Projection.M[0][2]; = x0 * Projection.M[0][0]*2*orthoHalfFov/FovPixels + + // orthoHorizontalOffset*Projection.M[0][0] + Projection.M[0][2]; + // So in the new projection matrix we need to scale by Projection.M[0][0]*2*orthoHalfFov/FovPixels + // and offset by orthoHorizontalOffset*Projection.M[0][0] + Projection.M[0][2]. + + ortho.M[0][0] = projection.M[0][0] * orthoScale.x; + ortho.M[0][1] = 0.0f; + ortho.M[0][2] = 0.0f; + ortho.M[0][3] = -projection.M[0][2] + (orthoHorizontalOffset * projection.M[0][0]); + + ortho.M[1][0] = 0.0f; + ortho.M[1][1] = + -projection.M[1][1] * orthoScale.y; /* Note sign flip (text rendering uses Y=down). */ + ortho.M[1][2] = 0.0f; + ortho.M[1][3] = -projection.M[1][2]; + + ortho.M[2][0] = 0.0f; + ortho.M[2][1] = 0.0f; + ortho.M[2][2] = 0.0f; + ortho.M[2][3] = 0.0f; + + /* No perspective correction for ortho. */ + ortho.M[3][0] = 0.0f; + ortho.M[3][1] = 0.0f; + ortho.M[3][2] = 0.0f; + ortho.M[3][3] = 1.0f; + + return ortho; +} + +#undef ovr_CalcEyePoses +OVR_PUBLIC_FUNCTION(void) +ovr_CalcEyePoses(ovrPosef headPose, const ovrVector3f hmdToEyeOffset[2], ovrPosef outEyePoses[2]) { + if (!hmdToEyeOffset || !outEyePoses) { + return; + } + + using OVR::Posef; + using OVR::Vector3f; + + // Currently hmdToEyeOffset is only a 3D vector + outEyePoses[0] = + Posef(headPose.Orientation, ((Posef)headPose).Apply((Vector3f)hmdToEyeOffset[0])); + outEyePoses[1] = + Posef(headPose.Orientation, ((Posef)headPose).Apply((Vector3f)hmdToEyeOffset[1])); +} + +OVR_PRIVATE_FUNCTION(void) +ovr_CalcEyePoses2(ovrPosef headPose, const ovrPosef hmdToEyePose[2], ovrPosef outEyePoses[2]) { + if (!hmdToEyePose || !outEyePoses) { + return; + } + + using OVR::Posef; + using OVR::Vector3f; + + outEyePoses[0] = (Posef)headPose * (Posef)hmdToEyePose[0]; + outEyePoses[1] = (Posef)headPose * (Posef)hmdToEyePose[1]; +} + +#undef ovr_GetEyePoses +OVR_PUBLIC_FUNCTION(void) +ovr_GetEyePoses( + ovrSession session, + long long frameIndex, + ovrBool latencyMarker, + const ovrVector3f hmdToEyeOffset[2], + ovrPosef outEyePoses[2], + double* outSensorSampleTime) { + double frameTime = ovr_GetPredictedDisplayTime(session, frameIndex); + ovrTrackingState trackingState = ovr_GetTrackingState(session, frameTime, latencyMarker); + ovr_CalcEyePoses(trackingState.HeadPose.ThePose, hmdToEyeOffset, outEyePoses); + + if (outSensorSampleTime != nullptr) { + *outSensorSampleTime = ovr_GetTimeInSeconds(); + } +} + +OVR_PRIVATE_FUNCTION(void) +ovr_GetEyePoses2( + ovrSession session, + long long frameIndex, + ovrBool latencyMarker, + const ovrPosef hmdToEyePose[2], + ovrPosef outEyePoses[2], + double* outSensorSampleTime) { + double frameTime = ovr_GetPredictedDisplayTime(session, frameIndex); + ovrTrackingState trackingState = ovr_GetTrackingState(session, frameTime, latencyMarker); + ovr_CalcEyePoses2(trackingState.HeadPose.ThePose, hmdToEyePose, outEyePoses); + + if (outSensorSampleTime != nullptr) { + *outSensorSampleTime = ovr_GetTimeInSeconds(); + } +} + +OVR_PUBLIC_FUNCTION(ovrDetectResult) ovr_Detect(int timeoutMilliseconds) { + // Initially we assume everything is not running. + ovrDetectResult result; + result.IsOculusHMDConnected = ovrFalse; + result.IsOculusServiceRunning = ovrFalse; + +#if !defined(OSX_UNIMPLEMENTED) + // Attempt to open the named event. + HANDLE hServiceEvent = ::OpenEventW(SYNCHRONIZE, FALSE, OVR_HMD_CONNECTED_EVENT_NAME); + + // If event exists, + if (hServiceEvent != nullptr) { + // This indicates that the Oculus Runtime is installed and running. + result.IsOculusServiceRunning = ovrTrue; + + // Poll for event state. + DWORD objectResult = ::WaitForSingleObject(hServiceEvent, timeoutMilliseconds); + + // If the event is signaled, + if (objectResult == WAIT_OBJECT_0) { + // This indicates that the Oculus HMD is connected. + result.IsOculusHMDConnected = ovrTrue; + } + + ::CloseHandle(hServiceEvent); + } +#else + (void)timeoutMilliseconds; + fprintf(stderr, __FILE__ "::[%s] Not implemented. Assuming single-process.\n", __func__); + result.IsOculusServiceRunning = ovrTrue; + result.IsOculusHMDConnected = ovrTrue; +#endif // OSX_UNIMPLEMENTED + + + return result; +} + +OVR_PUBLIC_FUNCTION(void) ovrPosef_FlipHandedness(const ovrPosef* inPose, ovrPosef* outPose) { + outPose->Orientation.x = -inPose->Orientation.x; + outPose->Orientation.y = inPose->Orientation.y; + outPose->Orientation.z = inPose->Orientation.z; + outPose->Orientation.w = -inPose->Orientation.w; + + outPose->Position.x = -inPose->Position.x; + outPose->Position.y = inPose->Position.y; + outPose->Position.z = inPose->Position.z; +} + +static float wavPcmBytesToFloat(const void* data, int32_t sizeInBits, bool swapBytes) { + // TODO Support big endian + (void)swapBytes; + + // There's not a strong standard to convert 8/16/32b PCM to float. + // For 16b: MSDN says range is [-32760, 32760], Pyton Scipy uses [-32767, 32767] and Audacity + // outputs the full range [-32768, 32767]. + // We use the same range on both sides and clamp to [-1, 1]. + + float result = 0.0f; + if (sizeInBits == 8) + // uint8_t is a special case, unsigned where 128 is zero + result = (*((uint8_t*)data) / (float)UCHAR_MAX) * 2.0f - 1.0f; + else if (sizeInBits == 16) + result = *((int16_t*)data) / (float)SHRT_MAX; + // else if (sizeInBits == 24) { + // int value = data[0] | data[1] << 8 | data[2] << 16; // Need consider 2's complement + // return value / 8388607.0f; + //} + else if (sizeInBits == 32) + result = *((int32_t*)data) / (float)INT_MAX; + + return std::max(-1.0f, result); +} + +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_GenHapticsFromAudioData( + ovrHapticsClip* outHapticsClip, + const ovrAudioChannelData* audioChannel, + ovrHapticsGenMode genMode) { + if (!outHapticsClip || !audioChannel || genMode != ovrHapticsGenMode_PointSample) + return ovrError_InvalidParameter; + // Validate audio channel + if (audioChannel->Frequency <= 0 || audioChannel->SamplesCount <= 0 || + audioChannel->Samples == nullptr) + return ovrError_InvalidParameter; + + const int32_t kHapticsFrequency = 320; + const int32_t kHapticsMaxAmplitude = 255; + float samplesPerStep = audioChannel->Frequency / (float)kHapticsFrequency; + int32_t hapticsSampleCount = (int32_t)ceil(audioChannel->SamplesCount / samplesPerStep); + + uint8_t* hapticsSamples = new uint8_t[hapticsSampleCount]; + for (int32_t i = 0; i < hapticsSampleCount; ++i) { + float sample = audioChannel->Samples[(int32_t)(i * samplesPerStep)]; + uint8_t hapticSample = + (uint8_t)std::min(UCHAR_MAX, (int)round(fabs(sample) * kHapticsMaxAmplitude)); + hapticsSamples[i] = hapticSample; + } + + outHapticsClip->Samples = hapticsSamples; + outHapticsClip->SamplesCount = hapticsSampleCount; + + return ovrSuccess; +} + +OVR_PUBLIC_FUNCTION(ovrResult) +ovr_ReadWavFromBuffer( + ovrAudioChannelData* outAudioChannel, + const void* inputData, + int dataSizeInBytes, + int stereoChannelToUse) { + // We don't support any format other than PCM and IEEE Float + enum WavFormats { + kWavFormatUnknown = 0x0000, + kWavFormatLPCM = 0x0001, + kWavFormatFloatIEEE = 0x0003, + kWavFormatExtensible = 0xFFFE + }; + + struct WavHeader { + char RiffId[4]; // "RIFF" = little-endian, "RIFX" = big-endian + int32_t Size; // 4 + (8 + FmtChunkSize) + (8 + DataChunkSize) + char WavId[4]; // Must be "WAVE" + + char FmtChunckId[4]; // Must be "fmt " + uint32_t FmtChunkSize; // Remaining size of this chunk (16B) + uint16_t Format; // WavFormats: PCM or Float supported + uint16_t Channels; // 1 = Mono, 2 = Stereo + uint32_t SampleRate; // e.g. 44100 + uint32_t BytesPerSec; // SampleRate * BytesPerBlock + uint16_t BytesPerBlock; // (NumChannels * BitsPerSample/8) + uint16_t BitsPerSample; // 8, 16, 32 + + char DataChunckId[4]; // Must be "data" + uint32_t DataChunkSize; // Remaining size of this chunk + }; + + const int32_t kMinWavFileSize = sizeof(WavHeader) + 1; + if (!outAudioChannel || !inputData || dataSizeInBytes < kMinWavFileSize) + return ovrError_InvalidParameter; + + WavHeader* header = (WavHeader*)inputData; + uint8_t* data = (uint8_t*)inputData + sizeof(WavHeader); + + // Validate + const char* wavId = header->RiffId; + // TODO We need to support RIFX when supporting big endian formats + // bool isValidWav = (wavId[0] == 'R' && wavId[1] == 'I' && wavId[2] == 'F' && (wavId[3] == 'F' || + // wavId[3] == 'X')) && + bool isValidWav = (wavId[0] == 'R' && wavId[1] == 'I' && wavId[2] == 'F' && wavId[3] == 'F') && + memcmp(header->WavId, "WAVE", 4) == 0; + bool hasValidChunks = + memcmp(header->FmtChunckId, "fmt ", 4) == 0 && memcmp(header->DataChunckId, "data ", 4) == 0; + if (!isValidWav || !hasValidChunks) { + return ovrError_InvalidOperation; + } + + // We only support PCM + bool isSupported = (header->Format == kWavFormatLPCM || header->Format == kWavFormatFloatIEEE) && + (header->Channels == 1 || header->Channels == 2) && + (header->BitsPerSample == 8 || header->BitsPerSample == 16 || header->BitsPerSample == 32); + if (!isSupported) { + return ovrError_Unsupported; + } + + // Channel selection + bool useSecondChannel = (header->Channels == 2 && stereoChannelToUse == 1); + int32_t channelOffset = (useSecondChannel) ? header->BytesPerBlock / 2 : 0; + + // TODO Support big-endian + int32_t blockCount = header->DataChunkSize / header->BytesPerBlock; + float* samples = new float[blockCount]; + + for (int32_t i = 0; i < blockCount; i++) { + int32_t dataIndex = i * header->BytesPerBlock; + uint8_t* dataPtr = &data[dataIndex + channelOffset]; + float sample = (header->Format == kWavFormatLPCM) + ? wavPcmBytesToFloat(dataPtr, header->BitsPerSample, false) + : *(float*)dataPtr; + + samples[i] = sample; + } + + // Output + outAudioChannel->Samples = samples; + outAudioChannel->SamplesCount = blockCount; + outAudioChannel->Frequency = header->SampleRate; + + return ovrSuccess; +} + +OVR_PUBLIC_FUNCTION(void) ovr_ReleaseAudioChannelData(ovrAudioChannelData* audioChannel) { + if (audioChannel != nullptr && audioChannel->Samples != nullptr) { + delete[] audioChannel->Samples; + memset(audioChannel, 0, sizeof(ovrAudioChannelData)); + } +} + +OVR_PUBLIC_FUNCTION(void) ovr_ReleaseHapticsClip(ovrHapticsClip* hapticsClip) { + if (hapticsClip != nullptr && hapticsClip->Samples != nullptr) { + delete[](uint8_t*) hapticsClip->Samples; + memset(hapticsClip, 0, sizeof(ovrHapticsClip)); + } +} diff --git a/Shim/OVR_CAPI_Util.o b/Shim/OVR_CAPI_Util.o new file mode 100644 index 0000000..3a1b4ba Binary files /dev/null and b/Shim/OVR_CAPI_Util.o differ diff --git a/Shim/OVR_StereoProjection.cpp b/Shim/OVR_StereoProjection.cpp new file mode 100755 index 0000000..8465e19 --- /dev/null +++ b/Shim/OVR_StereoProjection.cpp @@ -0,0 +1,218 @@ +/************************************************************************************ + +Filename : OVR_StereoProjection.cpp +Content : Stereo rendering functions +Created : November 30, 2013 +Authors : Tom Fosyth + +Copyright : Copyright 2014-2016 Oculus VR, LLC All Rights reserved. + +Licensed under the Oculus VR Rift SDK License Version 3.3 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, +which is provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +You may obtain a copy of the License at + +http://www.oculusvr.com/licenses/LICENSE-3.3 + +Unless required by applicable law or agreed to in writing, the Oculus VR SDK +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*************************************************************************************/ + +#include + +namespace OVR { + +ScaleAndOffset2D CreateNDCScaleAndOffsetFromFov(FovPort tanHalfFov) { + float projXScale = 2.0f / (tanHalfFov.LeftTan + tanHalfFov.RightTan); + float projXOffset = (tanHalfFov.LeftTan - tanHalfFov.RightTan) * projXScale * 0.5f; + float projYScale = 2.0f / (tanHalfFov.UpTan + tanHalfFov.DownTan); + float projYOffset = (tanHalfFov.UpTan - tanHalfFov.DownTan) * projYScale * 0.5f; + + ScaleAndOffset2D result; + result.Scale = Vector2f(projXScale, projYScale); + result.Offset = Vector2f(projXOffset, projYOffset); + // Hey - why is that Y.Offset negated? + // It's because a projection matrix transforms from world coords with Y=up, + // whereas this is from NDC which is Y=down. + + return result; +} + +Matrix4f CreateProjection( + bool leftHanded, + bool isOpenGL, + FovPort tanHalfFov, + StereoEye /*eye*/, + float zNear /*= 0.01f*/, + float zFar /*= 10000.0f*/, + bool flipZ /*= false*/, + bool farAtInfinity /*= false*/) { + if (!flipZ && farAtInfinity) { + // OVR_ASSERT_M(false, "Cannot push Far Clip to Infinity when Z-order is not flipped"); + // Assertion disabled because this code no longer has access to LibOVRKernel assertion + // functionality. + farAtInfinity = false; + } + + // A projection matrix is very like a scaling from NDC, so we can start with that. + ScaleAndOffset2D scaleAndOffset = CreateNDCScaleAndOffsetFromFov(tanHalfFov); + + float handednessScale = leftHanded ? 1.0f : -1.0f; + + Matrix4f projection; + // Produces X result, mapping clip edges to [-w,+w] + projection.M[0][0] = scaleAndOffset.Scale.x; + projection.M[0][1] = 0.0f; + projection.M[0][2] = handednessScale * scaleAndOffset.Offset.x; + projection.M[0][3] = 0.0f; + + // Produces Y result, mapping clip edges to [-w,+w] + // Hey - why is that YOffset negated? + // It's because a projection matrix transforms from world coords with Y=up, + // whereas this is derived from an NDC scaling, which is Y=down. + projection.M[1][0] = 0.0f; + projection.M[1][1] = scaleAndOffset.Scale.y; + projection.M[1][2] = handednessScale * -scaleAndOffset.Offset.y; + projection.M[1][3] = 0.0f; + + // Produces Z-buffer result - app needs to fill this in with whatever Z range it wants. + // We'll just use some defaults for now. + projection.M[2][0] = 0.0f; + projection.M[2][1] = 0.0f; + + if (farAtInfinity) { + if (isOpenGL) { + // It's not clear this makes sense for OpenGL - you don't get the same precision benefits you + // do in D3D. + projection.M[2][2] = -handednessScale; + projection.M[2][3] = 2.0f * zNear; + } else { + projection.M[2][2] = 0.0f; + projection.M[2][3] = zNear; + } + } else { + if (isOpenGL) { + // Clip range is [-w,+w], so 0 is at the middle of the range. + projection.M[2][2] = + -handednessScale * (flipZ ? -1.0f : 1.0f) * (zNear + zFar) / (zNear - zFar); + projection.M[2][3] = 2.0f * ((flipZ ? -zFar : zFar) * zNear) / (zNear - zFar); + } else { + // Clip range is [0,+w], so 0 is at the start of the range. + projection.M[2][2] = -handednessScale * (flipZ ? -zNear : zFar) / (zNear - zFar); + projection.M[2][3] = ((flipZ ? -zFar : zFar) * zNear) / (zNear - zFar); + } + } + + // Produces W result (= Z in) + projection.M[3][0] = 0.0f; + projection.M[3][1] = 0.0f; + projection.M[3][2] = handednessScale; + projection.M[3][3] = 0.0f; + + return projection; +} + +Matrix4f CreateOrthoSubProjection( + bool /*rightHanded*/, + StereoEye eyeType, + float tanHalfFovX, + float tanHalfFovY, + float unitsX, + float unitsY, + float distanceFromCamera, + float interpupillaryDistance, + Matrix4f const& projection, + float zNear /*= 0.0f*/, + float zFar /*= 0.0f*/, + bool flipZ /*= false*/, + bool farAtInfinity /*= false*/) { + if (!flipZ && farAtInfinity) { + // OVR_ASSERT_M(false, "Cannot push Far Clip to Infinity when Z-order is not flipped"); + // Assertion disabled because this code no longer has access to LibOVRKernel assertion + // functionality. + farAtInfinity = false; + } + + float orthoHorizontalOffset = interpupillaryDistance * 0.5f / distanceFromCamera; + switch (eyeType) { + case StereoEye_Left: + break; + case StereoEye_Right: + orthoHorizontalOffset = -orthoHorizontalOffset; + break; + case StereoEye_Center: + orthoHorizontalOffset = 0.0f; + break; + default: + break; + } + + // Current projection maps real-world vector (x,y,1) to the RT. + // We want to find the projection that maps the range [-FovPixels/2,FovPixels/2] to + // the physical [-orthoHalfFov,orthoHalfFov] + // Note moving the offset from M[0][2]+M[1][2] to M[0][3]+M[1][3] - this means + // we don't have to feed in Z=1 all the time. + // The horizontal offset math is a little hinky because the destination is + // actually [-orthoHalfFov+orthoHorizontalOffset,orthoHalfFov+orthoHorizontalOffset] + // So we need to first map [-FovPixels/2,FovPixels/2] to + // [-orthoHalfFov+orthoHorizontalOffset,orthoHalfFov+orthoHorizontalOffset]: + // x1 = x0 * orthoHalfFov/(FovPixels/2) + orthoHorizontalOffset; + // = x0 * 2*orthoHalfFov/FovPixels + orthoHorizontalOffset; + // But then we need the sam mapping as the existing projection matrix, i.e. + // x2 = x1 * Projection.M[0][0] + Projection.M[0][2]; + // = x0 * (2*orthoHalfFov/FovPixels + orthoHorizontalOffset) * Projection.M[0][0] + + // Projection.M[0][2]; + // = x0 * Projection.M[0][0]*2*orthoHalfFov/FovPixels + + // orthoHorizontalOffset*Projection.M[0][0] + Projection.M[0][2]; + // So in the new projection matrix we need to scale by Projection.M[0][0]*2*orthoHalfFov/FovPixels + // and + // offset by orthoHorizontalOffset*Projection.M[0][0] + Projection.M[0][2]. + + float orthoScaleX = 2.0f * tanHalfFovX / unitsX; + float orthoScaleY = 2.0f * tanHalfFovY / unitsY; + Matrix4f ortho; + ortho.M[0][0] = projection.M[0][0] * orthoScaleX; + ortho.M[0][1] = 0.0f; + ortho.M[0][2] = 0.0f; + ortho.M[0][3] = -projection.M[0][2] + (orthoHorizontalOffset * projection.M[0][0]); + + ortho.M[1][0] = 0.0f; + ortho.M[1][1] = -projection.M[1][1] * orthoScaleY; // Note sign flip (text rendering uses Y=down). + ortho.M[1][2] = 0.0f; + ortho.M[1][3] = -projection.M[1][2]; + + const float zDiff = zNear - zFar; + if (fabsf(zDiff) < 0.001f) { + ortho.M[2][0] = 0.0f; + ortho.M[2][1] = 0.0f; + ortho.M[2][2] = 0.0f; + ortho.M[2][3] = flipZ ? zNear : zFar; + } else { + ortho.M[2][0] = 0.0f; + ortho.M[2][1] = 0.0f; + + if (farAtInfinity) { + ortho.M[2][2] = 0.0f; + ortho.M[2][3] = zNear; + } else if (zDiff != 0.0f) { + ortho.M[2][2] = (flipZ ? zNear : zFar) / zDiff; + ortho.M[2][3] = ((flipZ ? -zFar : zFar) * zNear) / zDiff; + } + } + + // No perspective correction for ortho. + ortho.M[3][0] = 0.0f; + ortho.M[3][1] = 0.0f; + ortho.M[3][2] = 0.0f; + ortho.M[3][3] = 1.0f; + + return ortho; +} + +} // namespace OVR diff --git a/Shim/OVR_StereoProjection.o b/Shim/OVR_StereoProjection.o new file mode 100644 index 0000000..d89cebe Binary files /dev/null and b/Shim/OVR_StereoProjection.o differ