Starcommander* and I have been trying to get OpenXR based virtual reality to work in JMonkey (Credit where credits due; Starcommander did most of the initial getting it to work, I’ve been tinkering round the edges and adding hand related stuff like actions and hand skeletons). It mostly works but with some odd lensing effects (Objects in the corner of your eye move so they no longer seem to be where they are supposed to be). My suspicion is because of the way the eyes are being rendered but I don’t really know.
The render loop looks like this:
private boolean renderLayerOpenXR(MemoryStack stack, long predictedDisplayTime, XrCompositionLayerProjection layer) {
XrViewState viewState = XrViewState.calloc(stack)
.type$Default();
IntBuffer pi = stack.mallocInt(1);
check(xrLocateViews(
xrSession,
XrViewLocateInfo.malloc(stack)
.type$Default()
.next(NULL)
.viewConfigurationType(viewConfigType)
.displayTime(predictedDisplayTime)
.space(xrAppSpace),
viewState,
pi,
views
));
if ((viewState.viewStateFlags() & XR_VIEW_STATE_POSITION_VALID_BIT) == 0 ||
(viewState.viewStateFlags() & XR_VIEW_STATE_ORIENTATION_VALID_BIT) == 0) {
return false; // There is no valid tracking poses for the views.
}
int viewCountOutput = pi.get(0);
assert (viewCountOutput == views.capacity());
assert (viewCountOutput == viewConfigs.capacity());
assert (viewCountOutput == swapchains.length);
XrCompositionLayerProjectionView.Buffer projectionLayerViews = XRHelper.fill(
XrCompositionLayerProjectionView.calloc(viewCountOutput, stack),
XrCompositionLayerProjectionView.TYPE,
XR_TYPE_COMPOSITION_LAYER_PROJECTION_VIEW
);
// Render view to the appropriate part of the swapchain image.
for (int viewIndex = 0; viewIndex < viewCountOutput; viewIndex++) {
// Each view has a separate swapchain which is acquired, rendered to, and released.
Swapchain viewSwapchain = swapchains[viewIndex];
check(xrAcquireSwapchainImage(
viewSwapchain.handle,
XrSwapchainImageAcquireInfo.calloc(stack)
.type$Default(),
pi
));
int swapchainImageIndex = pi.get(0);
check(xrWaitSwapchainImage(
viewSwapchain.handle,
XrSwapchainImageWaitInfo.malloc(stack)
.type$Default()
.next(NULL)
.timeout(XR_INFINITE_DURATION)
));
XrCompositionLayerProjectionView projectionLayerView = projectionLayerViews.get(viewIndex)
.pose(views.get(viewIndex).pose())
.fov(views.get(viewIndex).fov())
.subImage(si -> si
.swapchain(viewSwapchain.handle)
.imageRect(rect -> rect
.offset(offset -> offset
.x(0)
.y(0))
.extent(extent -> extent
.width(viewSwapchain.width)
.height(viewSwapchain.height)
)));
openGLRenderView(projectionLayerView, viewSwapchain.images.get(swapchainImageIndex), viewIndex);
check(xrReleaseSwapchainImage(
viewSwapchain.handle,
XrSwapchainImageReleaseInfo.calloc(stack)
.type$Default()
));
}
layer.space(xrAppSpace);
layer.views(projectionLayerViews);
checkForGLErrors();
return true;
}
private static Vector3f viewPos = new Vector3f();
private static Quaternion viewRot = new Quaternion();
private void openGLRenderView(XrCompositionLayerProjectionView layerView, XrSwapchainImageOpenGLKHR swapchainImage, int viewIndex) {
glBindFramebuffer(GL_FRAMEBUFFER, swapchainFramebuffer);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, swapchainImage.image(), 0);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, depthTextures.get(swapchainImage), 0);
XrRect2Di imageRect = layerView.subImage().imageRect();
glViewport(
imageRect.offset().x(),
imageRect.offset().y(),
imageRect.extent().width(),
imageRect.extent().height()
);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
glFrontFace(GL_CW);
glCullFace(GL_BACK);
glEnable(GL_DEPTH_TEST);
XrPosef pose = layerView.pose();
XrVector3f pos = pose.position$();
XrQuaternionf orientation = pose.orientation();
//I don't like these coordinate transformations, they were necessary, but why
viewPos.set(pos.x(), pos.y(), -pos.z());
viewRot.set(orientation.x(), orientation.y(), -orientation.z(), orientation.w());
viewRot.inverseLocal();
Eye eye = viewIndex == 0 ? xrHmd.getLeftEye() : xrHmd.getRightEye();
eye.setRotation(viewRot);
eye.setPosition(viewPos);
float foyY = -layerView.fov().angleLeft() + layerView.fov().angleRight();
float foyX = -layerView.fov().angleDown() + layerView.fov().angleUp();
eye.setFieldOfView(foyX,foyY);
eye.render();
glBindFramebuffer(GL_FRAMEBUFFER, 0);
if (viewIndex == swapchains.length - 1) {
glFlush();
}
checkForGLErrors();
}
With the supporting Eye class like this:
public class Eye {
static int index = 0;
private final SimpleApplication app;
private Texture2D offTex;
private final Geometry offGeo;
private Camera offCamera;
float fovX = -1;
float fovY = -1;
public Eye(SimpleApplication app)
{
this.app = app;
setupOffscreenView(app);
Material mat = new Material(app.getAssetManager(), "Common/MatDefs/Misc/Unshaded.j3md");
mat.setTexture("ColorMap", offTex);
offGeo = new Geometry("box", new Box(1, 1, 1));
offGeo.setMaterial(mat);
}
public void setPosition(Vector3f newPosition){
offCamera.setLocation(newPosition);
}
public void setRotation(Quaternion newRotation){
offCamera.setRotation(newRotation);
}
/**
* Sets the field of view for the eye. Angles in radians.
*/
public void setFieldOfView(float fovX, float fovY){
if (this.fovX!= fovX || this.fovY!= fovY){
this.fovX = fovX;
this.fovY = fovY;
offCamera.setFrustumPerspective(FastMath.RAD_TO_DEG*fovY, fovX/fovY, 0.1f, 1000f);
}
}
private void setupOffscreenView(SimpleApplication app)
{
int w = app.getContext().getSettings().getWidth();
int h = app.getContext().getSettings().getHeight();
offCamera = new Camera(w, h);
ViewPort offView = app.getRenderManager().createPreView("OffscreenViewX" + (index++), offCamera);
offView.setClearFlags(true, true, true);
offView.setBackgroundColor(ColorRGBA.DarkGray);
FrameBuffer offBuffer = new FrameBuffer(w, h, 1);
//setup framebuffer's texture
offTex = new Texture2D(w, h, Format.RGBA8);
offTex.setMinFilter(Texture.MinFilter.Trilinear);
offTex.setMagFilter(Texture.MagFilter.Bilinear);
//setup framebuffer to use texture
offBuffer.setDepthTarget(FrameBufferTarget.newTarget(Format.Depth));
offBuffer.addColorTarget(FrameBufferTarget.newTarget(offTex));
//set viewport to render to offscreen framebuffer
offView.setOutputFrameBuffer(offBuffer);
offView.attachScene(app.getRootNode());
}
public void render()
{
app.getRenderManager().renderGeometry(offGeo);
}
}
That rendering by rendering a cube that has it’s texture set as the cameras output feels wrong, but I tried doing things like
app.getRenderManager().renderViewPort(eye.getViewPort(), tpf)
and that didn’t work (left eye completely black, right eye the background colour of the camera).
Does anyone have any guidance on if this is the right way to render the eye cameras? Or what might be causing the lensing (I would provide a video but I don’t think a video would necessarily show the wrongness - its a subtle misalignment between where you know things should be and where they are actually rendered - Setting the FOV to 130° rather than what OpenXR suggests (110° ish) reduces the effect but feels like its two wrongs making a right rather than the real solution)
The full code is at jmonkeyengine/jme3-xr at openxr-withmove · richardTingle/jmonkeyengine · GitHub
*Not sure what Starcommander’s handle is on the JME forum?