Need help retrieving frame timings from OpenXR

Hi,

I’m want to retrieve frame timings from OpenXR using OpenXRFeature from Unity. A friend of mine created a GitHub repository for this purpose, but the values we get are incorrect, always around 11ms.
Here is the repository link : OpenXRNativePlugin/InterceptFeaturePlugin at master · pikanezi/OpenXRNativePlugin · GitHub

    #include "IUnityInterface.h"
    #include "IUnityLog.h"
    #include "openxr/openxr.h"
    #include <cstring>
    #include <string>
    #include <chrono>
    #include <format>
     
    typedef void (*SendOpenXrGpuTime_pfn)(double openXrGpuTime);
    typedef void (*SendOpenXrCpuTime_pfn)(double openXrCpuTime);
     
    static PFN_xrGetInstanceProcAddr s_xrGetInstanceProcAddr = nullptr;
    static PFN_xrBeginFrame s_xrBeginFrame = nullptr;
    static PFN_xrWaitFrame s_xrWaitFrame= nullptr;
     
    std::chrono::high_resolution_clock::time_point last_gpu_time;
    std::chrono::high_resolution_clock::time_point last_cpu_time;
     
    static IUnityLog* unityLogPtr = nullptr;
     
    static SendOpenXrGpuTime_pfn send_gpu_time = nullptr;
    static SendOpenXrCpuTime_pfn send_cpu_time = nullptr;
     
     
    static XrResult XRAPI_PTR intercepted_xrBeginFrame(XrSession session, const XrFrameBeginInfo* frameBeginInfo)
    {
        XrResult result = s_xrBeginFrame(session, frameBeginInfo);
        auto now = std::chrono::high_resolution_clock::now();
        auto gpu_time = std::chrono::duration_cast<std::chrono::nanoseconds>(now - last_gpu_time).count();
        last_gpu_time = now;
        send_gpu_time((double)gpu_time);
     
        return result;
    }
     
    static XrResult XRAPI_PTR intercepted_xrWaitFrame(XrSession session, const XrFrameWaitInfo* frameWaitInfo, XrFrameState* frameState)
    {
        XrResult result = s_xrWaitFrame(session, frameWaitInfo, frameState);
        auto now = std::chrono::high_resolution_clock::now();
        auto cpu_time = std::chrono::duration_cast<std::chrono::nanoseconds>(now - last_cpu_time).count();
        last_cpu_time = now;
        send_cpu_time((double)cpu_time);
     
        return result;
    }
     
    static XrResult XRAPI_PTR intercept_functions_xrGetInstanceProcAddr(XrInstance instance, const char* name, PFN_xrVoidFunction* function)
    {
        XrResult result = s_xrGetInstanceProcAddr(instance, name, function);
        auto str_name = std::string(name);
        if ("xrBeginFrame" == str_name)
        {
            s_xrBeginFrame = (PFN_xrBeginFrame)*function;
            *function = (PFN_xrVoidFunction)intercepted_xrBeginFrame;
        }
        if ("xrWaitFrame" == str_name)
        {
            s_xrWaitFrame = (PFN_xrWaitFrame)*function;
            *function = (PFN_xrVoidFunction)intercepted_xrWaitFrame;
        }
        return result;
    }
     
    extern "C"
    {
        void UNITY_INTERFACE_EXPORT UNITY_INTERFACE_API script_set_gpu_time_callback(SendOpenXrGpuTime_pfn callback)
        {
            send_gpu_time = callback;
        }
     
        void UNITY_INTERFACE_EXPORT UNITY_INTERFACE_API script_set_cpu_time_callback(SendOpenXrCpuTime_pfn callback)
        {
            send_cpu_time = callback;
        }
     
        PFN_xrGetInstanceProcAddr UNITY_INTERFACE_EXPORT UNITY_INTERFACE_API script_intercept_xrCreateSession_xrGetInstanceProcAddr(PFN_xrGetInstanceProcAddr old)
        {
            s_xrGetInstanceProcAddr = old;
            return intercept_functions_xrGetInstanceProcAddr;
        }
     
        void UNITY_INTERFACE_EXPORT UNITY_INTERFACE_API UnityPluginLoad(IUnityInterfaces* unityInterfaces)
        {
            unityLogPtr = unityInterfaces->Get<IUnityLog>();
        }
     
        void UNITY_INTERFACE_EXPORT UNITY_INTERFACE_API UnityPluginUnload()
        {
            unityLogPtr = nullptr;
        }
    }

I am looking for suggestions for what could be wrong.
Thanks !

If I’m reading that code right, that code measures the interval between calls to xrWaitFrame and xrBeginFrame respectively. Since XR rendering is vsynced, that will end up sampling roughly the same time relative to the frame start for every frame (as long as the application makes the frame rate), so the difference will just end up being the vsync interval.

You would have to measure something like the time between xrBeginFrame and xrEndFrame to get the render thread performance, and sample both sides of the native xrWaitFrame call to exclude the waiting time.

Note that some runtimes like SteamVR will block on xrBeginFrame instead of xrWaitFrame (against what the spec recommends), so you might still get unexpected values when testing between different runtimes.

1 Like

That make sense.
Thanks alot for your help !