cuda解码 opengl 显示_CUDA / OpenGL互操作,用CUDA绘制到OpenGL纹理

  • Post author:
  • Post category:其他


I am writing a rendering system in CUDA and want results to be quickly displayed via OpenGL, without touching main memory. I basically do the following:

Create and initialize OpenGL texture, and register it in CUDA as cudaGraphicsResource

GLuint viewGLTexture;

cudaGraphicsResource_t viewCudaResource;

void initialize() {

glEnable(GL_TEXTURE_2D);

glGenTextures(1, &viewGLTexture);

glBindTexture(GL_TEXTURE_2D, viewGLTexture);

{

glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);

glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);

glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, view.getWidth(), view.getHeight(), 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);

}

glBindTexture(GL_TEXTURE_2D, 0);

cudaGraphicsGLRegisterImage(&viewCudaResource, viewGLTexture, GL_TEXTURE_2D, cudaGraphicsRegisterFlagsWriteDiscard)

}

Whenever view is resized I resize viewport and texture image appropriately:

void resize() {

glViewport(0, 0, view.getWidth(), view.getHeight());

glBindTexture(GL_TEXTURE_2D, viewGLTexture);

{

glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, view.getWidth(), view.getHeight(), 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);

}

glBindTexture(GL_TEXTURE_2D, 0);

}

And then each frame I map graphicsResource as a cudaSurfaceObject via cudaArray, call rendering kernel on it, unmap and synchronize to let OpenGL draw a fullscreen quad with this texture:

void renderFrame() {

cudaGraphicsMapResources(1, &viewCudaResource);

{

cudaArray_t viewCudaArray;

cudaGraphicsSubResourceGetMappedArray(&viewCudaArray, viewCudaResource, 0, 0);

cudaResourceDesc viewCudaArrayResourceDesc;

{

viewCudaArrayResourceDesc.resType = cudaResourceTypeArray;

viewCudaArrayResourceDesc.res.array.array = viewCudaArray;

}

cudaSurfaceObject_t viewCudaSurfaceObject;

cudaCreateSurfaceObject(&viewCudaSurfaceObject, &viewCudaArrayResourceDesc);

{

invokeRenderingKernel(viewCudaSurfaceObject);

}

cudaDestroySurfaceObject(viewCudaSurfaceObject));

}

cudaGraphicsUnmapResources(1, &viewCudaResource);

cudaStreamSynchronize(0);

glBindTexture(GL_TEXTURE_2D, viewGLTexture);

{

glBegin(GL_QUADS);

{

glTexCoord2f(0.0f, 0.0f); glVertex2f(-1.0f, -1.0f);

glTexCoord2f(1.0f, 0.0f); glVertex2f(+1.0f, -1.0f);

glTexCoord2f(1.0f, 1.0f); glVertex2f(+1.0f, +1.0f);

glTexCoord2f(0.0f, 1.0f); glVertex2f(-1.0f, +1.0f);

}

glEnd();

}

glBindTexture(GL_TEXTURE_2D, 0);

glFinish();

}

The problem is: Whenever view is resized all CUDA calls start spewing out “unknown error”s and visually it looks like the texture is not in fact resized, just stretched across the whole view. Why is this happening and how do I fix it?

解决方案

It seems interop requires to re-register textures upon resize. The following works:

void resize() {

glViewport(0, 0, view.getWidth(), view.getHeight());

// unregister

cudaGraphicsUnregisterResource(viewCudaResource);

// resize

glBindTexture(GL_TEXTURE_2D, viewGLTexture);

{

glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, view.getWidth(), view.getHeight(), 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);

}

glBindTexture(GL_TEXTURE_2D, 0);

// register back

cudaGraphicsGLRegisterImage(&viewCudaResource, viewGLTexture, GL_TEXTURE_2D, cudaGraphicsRegisterFlagsWriteDiscard);

}



版权声明:本文为weixin_39571749原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接和本声明。