obs-StreamFX/source/nvidia/cuda/nvidia-cuda-context.cpp
Michael Fabian 'Xaymar' Dirks 4d8ff417e7 nvidia-cuda: Improve usage of CUDA resources and functions
Load additional functions from CUDA and add new enumerations to support them:
* cuDevicePrimaryCtxSetFlags allows us to sched scheduling mode for the GPU.
* cuCtxgetStreamPriorityRange allows us to check which priority levels are supported.
* cuStreamCreateWithPriority allows us to create streams with non-default priority.

The scheduler mode is now set to yield so that other threads can do work when we hit an eventual stalling problem. Streams can also now be created with higher priority and different flags, if necessary. In most cases this should allow CUDA resources to execute even while the GPU is under heavy load.
2020-04-17 11:44:37 +02:00

83 lines
2.4 KiB
C++

/*
* Modern effects for a modern Streamer
* Copyright (C) 2020 Michael Fabian Dirks
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*/
#include "nvidia-cuda-context.hpp"
#include <stdexcept>
#ifdef WIN32
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4191 4365 4777 5039 5204)
#endif
#include <atlutil.h>
#ifdef _MSC_VER
#pragma warning(pop)
#endif
#endif
nvidia::cuda::context::context(std::shared_ptr<::nvidia::cuda::cuda> cuda)
: _cuda(cuda), _ctx(), _has_device(false), _device()
{
if (!cuda)
throw std::invalid_argument("cuda");
}
nvidia::cuda::context::~context()
{
if (_has_device) {
_cuda->cuDevicePrimaryCtxRelease(_device);
}
_cuda->cuCtxDestroy(_ctx);
}
#ifdef WIN32
nvidia::cuda::context::context(std::shared_ptr<::nvidia::cuda::cuda> cuda, ID3D11Device* device) : context(cuda)
{
using namespace nvidia::cuda;
if (!device)
throw std::invalid_argument("device");
// Get DXGI Device
IDXGIDevice* dxgi_device; // Don't use ATL::CComPtr
device->QueryInterface(__uuidof(IDXGIDevice), (void**)&dxgi_device);
// Get DXGI Adapter
ATL::CComPtr<IDXGIAdapter> dxgi_adapter;
dxgi_device->GetAdapter(&dxgi_adapter);
// Get Device Index
if (cu_result res = _cuda->cuD3D11GetDevice(&_device, dxgi_adapter); res != cu_result::SUCCESS) {
throw std::runtime_error("Failed to get device index for device.");
}
// Acquire Context
if (cu_result res = _cuda->cuDevicePrimaryCtxRetain(&_ctx, _device); res != cu_result::SUCCESS) {
throw std::runtime_error("Failed to acquire primary device context.");
}
_cuda->cuDevicePrimaryCtxSetFlags(_device, cu_context_flags::SCHEDULER_YIELD);
_has_device = true;
}
#endif
::nvidia::cuda::cu_context_t nvidia::cuda::context::get()
{
return _ctx;
}