1 /************************************************************************************
\r
3 PublicHeader: OVR_CAPI_Util.c
\r
4 Copyright : Copyright 2014-2016 Oculus VR, LLC All Rights reserved.
\r
6 Licensed under the Oculus VR Rift SDK License Version 3.3 (the "License");
\r
7 you may not use the Oculus VR Rift SDK except in compliance with the License,
\r
8 which is provided at the time of installation or download, or which
\r
9 otherwise accompanies this software in either electronic or hard copy form.
\r
11 You may obtain a copy of the License at
\r
13 http://www.oculusvr.com/licenses/LICENSE-3.3
\r
15 Unless required by applicable law or agreed to in writing, the Oculus VR SDK
\r
16 distributed under the License is distributed on an "AS IS" BASIS,
\r
17 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
\r
18 See the License for the specific language governing permissions and
\r
19 limitations under the License.
\r
21 *************************************************************************************/
\r
23 #include <Extras/OVR_CAPI_Util.h>
\r
24 #include <Extras/OVR_StereoProjection.h>
\r
26 #include <algorithm>
\r
30 #if !defined(_WIN32)
\r
34 #if defined(_MSC_VER) && _MSC_VER < 1800 // MSVC < 2013
\r
35 #define round(dbl) \
\r
36 (dbl) >= 0.0 ? (int)((dbl) + 0.5) \
\r
37 : (((dbl) - (double)(int)(dbl)) <= -0.5 ? (int)(dbl) : (int)((dbl)-0.5))
\r
41 #if defined(_MSC_VER)
\r
42 #include <emmintrin.h>
\r
43 #pragma intrinsic(_mm_pause)
\r
47 // Prevents <Windows.h> from defining min() and max() macro symbols.
\r
52 #include <windows.h>
\r
55 // Used to generate projection from ovrEyeDesc::Fov
\r
56 OVR_PUBLIC_FUNCTION(ovrMatrix4f)
\r
57 ovrMatrix4f_Projection(ovrFovPort fov, float znear, float zfar, unsigned int projectionModFlags) {
\r
58 bool leftHanded = (projectionModFlags & ovrProjection_LeftHanded) > 0;
\r
59 bool flipZ = (projectionModFlags & ovrProjection_FarLessThanNear) > 0;
\r
60 bool farAtInfinity = (projectionModFlags & ovrProjection_FarClipAtInfinity) > 0;
\r
61 bool isOpenGL = (projectionModFlags & ovrProjection_ClipRangeOpenGL) > 0;
\r
63 // TODO: Pass in correct eye to CreateProjection if we want to support canted displays from CAPI
\r
64 return OVR::CreateProjection(
\r
65 leftHanded, isOpenGL, fov, OVR::StereoEye_Center, znear, zfar, flipZ, farAtInfinity);
\r
68 OVR_PUBLIC_FUNCTION(ovrTimewarpProjectionDesc)
\r
69 ovrTimewarpProjectionDesc_FromProjection(ovrMatrix4f Projection, unsigned int projectionModFlags) {
\r
70 ovrTimewarpProjectionDesc res;
\r
71 res.Projection22 = Projection.M[2][2];
\r
72 res.Projection23 = Projection.M[2][3];
\r
73 res.Projection32 = Projection.M[3][2];
\r
75 if ((res.Projection32 != 1.0f) && (res.Projection32 != -1.0f)) {
\r
76 // This is a very strange projection matrix, and probably won't work.
\r
77 // If you need it to work, please contact Oculus and let us know your usage scenario.
\r
80 if ((projectionModFlags & ovrProjection_ClipRangeOpenGL) != 0) {
\r
81 // Internally we use the D3D range of [0,+w] not the OGL one of [-w,+w], so we need to convert
\r
82 // one to the other.
\r
83 // Note that the values in the depth buffer, and the actual linear depth we want is the same for
\r
85 // the difference is purely in the values inside the projection matrix.
\r
88 // depthBuffer = ( ProjD3D.M[2][2] * linearDepth + ProjD3D.M[2][3] ) / ( linearDepth
\r
89 // * ProjD3D.M[3][2] );
\r
91 // depthBuffer = 0.5 + 0.5 * ( ProjOGL.M[2][2] * linearDepth + ProjOGL.M[2][3] ) / ( linearDepth
\r
92 // * ProjOGL.M[3][2] );
\r
95 // ProjD3D.M[2][2] = 0.5 * ( ProjOGL.M[2][2] + ProjOGL.M[3][2] );
\r
96 // ProjD3D.M[2][3] = 0.5 * ProjOGL.M[2][3];
\r
97 // ProjD3D.M[3][2] = ProjOGL.M[3][2];
\r
99 res.Projection22 = 0.5f * (Projection.M[2][2] + Projection.M[3][2]);
\r
100 res.Projection23 = 0.5f * Projection.M[2][3];
\r
101 res.Projection32 = Projection.M[3][2];
\r
106 OVR_PUBLIC_FUNCTION(ovrMatrix4f)
\r
107 ovrMatrix4f_OrthoSubProjection(
\r
108 ovrMatrix4f projection,
\r
109 ovrVector2f orthoScale,
\r
110 float orthoDistance,
\r
111 float hmdToEyeOffsetX) {
\r
113 // Negative sign is correct!
\r
114 // If the eye is offset to the left, then the ortho view needs to be offset to the right relative
\r
116 float orthoHorizontalOffset = -hmdToEyeOffsetX / orthoDistance;
\r
118 // Current projection maps real-world vector (x,y,1) to the RT.
\r
119 // We want to find the projection that maps the range [-FovPixels/2,FovPixels/2] to
\r
120 // the physical [-orthoHalfFov,orthoHalfFov]
\r
121 // Note moving the offset from M[0][2]+M[1][2] to M[0][3]+M[1][3] - this means
\r
122 // we don't have to feed in Z=1 all the time.
\r
123 // The horizontal offset math is a little hinky because the destination is
\r
124 // actually [-orthoHalfFov+orthoHorizontalOffset,orthoHalfFov+orthoHorizontalOffset]
\r
125 // So we need to first map [-FovPixels/2,FovPixels/2] to
\r
126 // [-orthoHalfFov+orthoHorizontalOffset,orthoHalfFov+orthoHorizontalOffset]:
\r
127 // x1 = x0 * orthoHalfFov/(FovPixels/2) + orthoHorizontalOffset;
\r
128 // = x0 * 2*orthoHalfFov/FovPixels + orthoHorizontalOffset;
\r
129 // But then we need the same mapping as the existing projection matrix, i.e.
\r
130 // x2 = x1 * Projection.M[0][0] + Projection.M[0][2];
\r
131 // = x0 * (2*orthoHalfFov/FovPixels + orthoHorizontalOffset) * Projection.M[0][0] +
\r
132 // Projection.M[0][2]; = x0 * Projection.M[0][0]*2*orthoHalfFov/FovPixels +
\r
133 // orthoHorizontalOffset*Projection.M[0][0] + Projection.M[0][2];
\r
134 // So in the new projection matrix we need to scale by Projection.M[0][0]*2*orthoHalfFov/FovPixels
\r
135 // and offset by orthoHorizontalOffset*Projection.M[0][0] + Projection.M[0][2].
\r
137 ortho.M[0][0] = projection.M[0][0] * orthoScale.x;
\r
138 ortho.M[0][1] = 0.0f;
\r
139 ortho.M[0][2] = 0.0f;
\r
140 ortho.M[0][3] = -projection.M[0][2] + (orthoHorizontalOffset * projection.M[0][0]);
\r
142 ortho.M[1][0] = 0.0f;
\r
144 -projection.M[1][1] * orthoScale.y; /* Note sign flip (text rendering uses Y=down). */
\r
145 ortho.M[1][2] = 0.0f;
\r
146 ortho.M[1][3] = -projection.M[1][2];
\r
148 ortho.M[2][0] = 0.0f;
\r
149 ortho.M[2][1] = 0.0f;
\r
150 ortho.M[2][2] = 0.0f;
\r
151 ortho.M[2][3] = 0.0f;
\r
153 /* No perspective correction for ortho. */
\r
154 ortho.M[3][0] = 0.0f;
\r
155 ortho.M[3][1] = 0.0f;
\r
156 ortho.M[3][2] = 0.0f;
\r
157 ortho.M[3][3] = 1.0f;
\r
162 #undef ovr_CalcEyePoses
\r
163 OVR_PUBLIC_FUNCTION(void)
\r
164 ovr_CalcEyePoses(ovrPosef headPose, const ovrVector3f hmdToEyeOffset[2], ovrPosef outEyePoses[2]) {
\r
165 if (!hmdToEyeOffset || !outEyePoses) {
\r
170 using OVR::Vector3f;
\r
172 // Currently hmdToEyeOffset is only a 3D vector
\r
174 Posef(headPose.Orientation, ((Posef)headPose).Apply((Vector3f)hmdToEyeOffset[0]));
\r
176 Posef(headPose.Orientation, ((Posef)headPose).Apply((Vector3f)hmdToEyeOffset[1]));
\r
179 OVR_PRIVATE_FUNCTION(void)
\r
180 ovr_CalcEyePoses2(ovrPosef headPose, const ovrPosef hmdToEyePose[2], ovrPosef outEyePoses[2]) {
\r
181 if (!hmdToEyePose || !outEyePoses) {
\r
186 using OVR::Vector3f;
\r
188 outEyePoses[0] = (Posef)headPose * (Posef)hmdToEyePose[0];
\r
189 outEyePoses[1] = (Posef)headPose * (Posef)hmdToEyePose[1];
\r
192 #undef ovr_GetEyePoses
\r
193 OVR_PUBLIC_FUNCTION(void)
\r
195 ovrSession session,
\r
196 long long frameIndex,
\r
197 ovrBool latencyMarker,
\r
198 const ovrVector3f hmdToEyeOffset[2],
\r
199 ovrPosef outEyePoses[2],
\r
200 double* outSensorSampleTime) {
\r
201 double frameTime = ovr_GetPredictedDisplayTime(session, frameIndex);
\r
202 ovrTrackingState trackingState = ovr_GetTrackingState(session, frameTime, latencyMarker);
\r
203 ovr_CalcEyePoses(trackingState.HeadPose.ThePose, hmdToEyeOffset, outEyePoses);
\r
205 if (outSensorSampleTime != nullptr) {
\r
206 *outSensorSampleTime = ovr_GetTimeInSeconds();
\r
210 OVR_PRIVATE_FUNCTION(void)
\r
212 ovrSession session,
\r
213 long long frameIndex,
\r
214 ovrBool latencyMarker,
\r
215 const ovrPosef hmdToEyePose[2],
\r
216 ovrPosef outEyePoses[2],
\r
217 double* outSensorSampleTime) {
\r
218 double frameTime = ovr_GetPredictedDisplayTime(session, frameIndex);
\r
219 ovrTrackingState trackingState = ovr_GetTrackingState(session, frameTime, latencyMarker);
\r
220 ovr_CalcEyePoses2(trackingState.HeadPose.ThePose, hmdToEyePose, outEyePoses);
\r
222 if (outSensorSampleTime != nullptr) {
\r
223 *outSensorSampleTime = ovr_GetTimeInSeconds();
\r
227 OVR_PUBLIC_FUNCTION(ovrDetectResult) ovr_Detect(int timeoutMilliseconds) {
\r
228 // Initially we assume everything is not running.
\r
229 ovrDetectResult result;
\r
230 result.IsOculusHMDConnected = ovrFalse;
\r
231 result.IsOculusServiceRunning = ovrFalse;
\r
233 #if !defined(OSX_UNIMPLEMENTED)
\r
234 // Attempt to open the named event.
\r
235 HANDLE hServiceEvent = ::OpenEventW(SYNCHRONIZE, FALSE, OVR_HMD_CONNECTED_EVENT_NAME);
\r
237 // If event exists,
\r
238 if (hServiceEvent != nullptr) {
\r
239 // This indicates that the Oculus Runtime is installed and running.
\r
240 result.IsOculusServiceRunning = ovrTrue;
\r
242 // Poll for event state.
\r
243 DWORD objectResult = ::WaitForSingleObject(hServiceEvent, timeoutMilliseconds);
\r
245 // If the event is signaled,
\r
246 if (objectResult == WAIT_OBJECT_0) {
\r
247 // This indicates that the Oculus HMD is connected.
\r
248 result.IsOculusHMDConnected = ovrTrue;
\r
251 ::CloseHandle(hServiceEvent);
\r
254 (void)timeoutMilliseconds;
\r
255 fprintf(stderr, __FILE__ "::[%s] Not implemented. Assuming single-process.\n", __func__);
\r
256 result.IsOculusServiceRunning = ovrTrue;
\r
257 result.IsOculusHMDConnected = ovrTrue;
\r
258 #endif // OSX_UNIMPLEMENTED
\r
264 OVR_PUBLIC_FUNCTION(void) ovrPosef_FlipHandedness(const ovrPosef* inPose, ovrPosef* outPose) {
\r
265 outPose->Orientation.x = -inPose->Orientation.x;
\r
266 outPose->Orientation.y = inPose->Orientation.y;
\r
267 outPose->Orientation.z = inPose->Orientation.z;
\r
268 outPose->Orientation.w = -inPose->Orientation.w;
\r
270 outPose->Position.x = -inPose->Position.x;
\r
271 outPose->Position.y = inPose->Position.y;
\r
272 outPose->Position.z = inPose->Position.z;
\r
275 static float wavPcmBytesToFloat(const void* data, int32_t sizeInBits, bool swapBytes) {
\r
276 // TODO Support big endian
\r
279 // There's not a strong standard to convert 8/16/32b PCM to float.
\r
280 // For 16b: MSDN says range is [-32760, 32760], Pyton Scipy uses [-32767, 32767] and Audacity
\r
281 // outputs the full range [-32768, 32767].
\r
282 // We use the same range on both sides and clamp to [-1, 1].
\r
284 float result = 0.0f;
\r
285 if (sizeInBits == 8)
\r
286 // uint8_t is a special case, unsigned where 128 is zero
\r
287 result = (*((uint8_t*)data) / (float)UCHAR_MAX) * 2.0f - 1.0f;
\r
288 else if (sizeInBits == 16)
\r
289 result = *((int16_t*)data) / (float)SHRT_MAX;
\r
290 // else if (sizeInBits == 24) {
\r
291 // int value = data[0] | data[1] << 8 | data[2] << 16; // Need consider 2's complement
\r
292 // return value / 8388607.0f;
\r
294 else if (sizeInBits == 32)
\r
295 result = *((int32_t*)data) / (float)INT_MAX;
\r
297 return std::max(-1.0f, result);
\r
300 OVR_PUBLIC_FUNCTION(ovrResult)
\r
301 ovr_GenHapticsFromAudioData(
\r
302 ovrHapticsClip* outHapticsClip,
\r
303 const ovrAudioChannelData* audioChannel,
\r
304 ovrHapticsGenMode genMode) {
\r
305 if (!outHapticsClip || !audioChannel || genMode != ovrHapticsGenMode_PointSample)
\r
306 return ovrError_InvalidParameter;
\r
307 // Validate audio channel
\r
308 if (audioChannel->Frequency <= 0 || audioChannel->SamplesCount <= 0 ||
\r
309 audioChannel->Samples == nullptr)
\r
310 return ovrError_InvalidParameter;
\r
312 const int32_t kHapticsFrequency = 320;
\r
313 const int32_t kHapticsMaxAmplitude = 255;
\r
314 float samplesPerStep = audioChannel->Frequency / (float)kHapticsFrequency;
\r
315 int32_t hapticsSampleCount = (int32_t)ceil(audioChannel->SamplesCount / samplesPerStep);
\r
317 uint8_t* hapticsSamples = new uint8_t[hapticsSampleCount];
\r
318 for (int32_t i = 0; i < hapticsSampleCount; ++i) {
\r
319 float sample = audioChannel->Samples[(int32_t)(i * samplesPerStep)];
\r
320 uint8_t hapticSample =
\r
321 (uint8_t)std::min(UCHAR_MAX, (int)round(fabs(sample) * kHapticsMaxAmplitude));
\r
322 hapticsSamples[i] = hapticSample;
\r
325 outHapticsClip->Samples = hapticsSamples;
\r
326 outHapticsClip->SamplesCount = hapticsSampleCount;
\r
331 OVR_PUBLIC_FUNCTION(ovrResult)
\r
332 ovr_ReadWavFromBuffer(
\r
333 ovrAudioChannelData* outAudioChannel,
\r
334 const void* inputData,
\r
335 int dataSizeInBytes,
\r
336 int stereoChannelToUse) {
\r
337 // We don't support any format other than PCM and IEEE Float
\r
339 kWavFormatUnknown = 0x0000,
\r
340 kWavFormatLPCM = 0x0001,
\r
341 kWavFormatFloatIEEE = 0x0003,
\r
342 kWavFormatExtensible = 0xFFFE
\r
346 char RiffId[4]; // "RIFF" = little-endian, "RIFX" = big-endian
\r
347 int32_t Size; // 4 + (8 + FmtChunkSize) + (8 + DataChunkSize)
\r
348 char WavId[4]; // Must be "WAVE"
\r
350 char FmtChunckId[4]; // Must be "fmt "
\r
351 uint32_t FmtChunkSize; // Remaining size of this chunk (16B)
\r
352 uint16_t Format; // WavFormats: PCM or Float supported
\r
353 uint16_t Channels; // 1 = Mono, 2 = Stereo
\r
354 uint32_t SampleRate; // e.g. 44100
\r
355 uint32_t BytesPerSec; // SampleRate * BytesPerBlock
\r
356 uint16_t BytesPerBlock; // (NumChannels * BitsPerSample/8)
\r
357 uint16_t BitsPerSample; // 8, 16, 32
\r
359 char DataChunckId[4]; // Must be "data"
\r
360 uint32_t DataChunkSize; // Remaining size of this chunk
\r
363 const int32_t kMinWavFileSize = sizeof(WavHeader) + 1;
\r
364 if (!outAudioChannel || !inputData || dataSizeInBytes < kMinWavFileSize)
\r
365 return ovrError_InvalidParameter;
\r
367 WavHeader* header = (WavHeader*)inputData;
\r
368 uint8_t* data = (uint8_t*)inputData + sizeof(WavHeader);
\r
371 const char* wavId = header->RiffId;
\r
372 // TODO We need to support RIFX when supporting big endian formats
\r
373 // bool isValidWav = (wavId[0] == 'R' && wavId[1] == 'I' && wavId[2] == 'F' && (wavId[3] == 'F' ||
\r
374 // wavId[3] == 'X')) &&
\r
375 bool isValidWav = (wavId[0] == 'R' && wavId[1] == 'I' && wavId[2] == 'F' && wavId[3] == 'F') &&
\r
376 memcmp(header->WavId, "WAVE", 4) == 0;
\r
377 bool hasValidChunks =
\r
378 memcmp(header->FmtChunckId, "fmt ", 4) == 0 && memcmp(header->DataChunckId, "data ", 4) == 0;
\r
379 if (!isValidWav || !hasValidChunks) {
\r
380 return ovrError_InvalidOperation;
\r
383 // We only support PCM
\r
384 bool isSupported = (header->Format == kWavFormatLPCM || header->Format == kWavFormatFloatIEEE) &&
\r
385 (header->Channels == 1 || header->Channels == 2) &&
\r
386 (header->BitsPerSample == 8 || header->BitsPerSample == 16 || header->BitsPerSample == 32);
\r
387 if (!isSupported) {
\r
388 return ovrError_Unsupported;
\r
391 // Channel selection
\r
392 bool useSecondChannel = (header->Channels == 2 && stereoChannelToUse == 1);
\r
393 int32_t channelOffset = (useSecondChannel) ? header->BytesPerBlock / 2 : 0;
\r
395 // TODO Support big-endian
\r
396 int32_t blockCount = header->DataChunkSize / header->BytesPerBlock;
\r
397 float* samples = new float[blockCount];
\r
399 for (int32_t i = 0; i < blockCount; i++) {
\r
400 int32_t dataIndex = i * header->BytesPerBlock;
\r
401 uint8_t* dataPtr = &data[dataIndex + channelOffset];
\r
402 float sample = (header->Format == kWavFormatLPCM)
\r
403 ? wavPcmBytesToFloat(dataPtr, header->BitsPerSample, false)
\r
404 : *(float*)dataPtr;
\r
406 samples[i] = sample;
\r
410 outAudioChannel->Samples = samples;
\r
411 outAudioChannel->SamplesCount = blockCount;
\r
412 outAudioChannel->Frequency = header->SampleRate;
\r
417 OVR_PUBLIC_FUNCTION(void) ovr_ReleaseAudioChannelData(ovrAudioChannelData* audioChannel) {
\r
418 if (audioChannel != nullptr && audioChannel->Samples != nullptr) {
\r
419 delete[] audioChannel->Samples;
\r
420 memset(audioChannel, 0, sizeof(ovrAudioChannelData));
\r
424 OVR_PUBLIC_FUNCTION(void) ovr_ReleaseHapticsClip(ovrHapticsClip* hapticsClip) {
\r
425 if (hapticsClip != nullptr && hapticsClip->Samples != nullptr) {
\r
426 delete[](uint8_t*) hapticsClip->Samples;
\r
427 memset(hapticsClip, 0, sizeof(ovrHapticsClip));
\r