Skip to content

Commit a6be16c

Browse files
authored
Merge pull request #19 from makepath/fixes-18-test-again-cuda-13
Adds support for optix 9.1 / cuda 13.1
2 parents b1c982f + 7bd2070 commit a6be16c

36 files changed

+15691
-368
lines changed

.github/workflows/test.yml

Lines changed: 18 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2,11 +2,9 @@ name: Test
22

33
on:
44
pull_request:
5-
branches:
6-
- master
5+
branches: [master]
76
push:
8-
branches:
9-
- master
7+
branches: [master]
108

119
jobs:
1210
test:
@@ -17,7 +15,7 @@ jobs:
1715
fail-fast: false
1816
matrix:
1917
os: [ubuntu-latest, windows-latest]
20-
python-version: ["3.7", "3.8", "3.9", "3.10"]
18+
python-version: ["3.11", "3.12", "3.13", "3.14"]
2119

2220
steps:
2321
- name: Checkout source
@@ -26,15 +24,29 @@ jobs:
2624
fetch-depth: 0
2725

2826
- name: Install Python ${{ matrix.python-version }}
29-
uses: actions/setup-python@v2
27+
uses: actions/setup-python@v6
3028
with:
3129
python-version: ${{ matrix.python-version }}
3230

31+
# Install CUDA toolkit (nvcc + CUDA_PATH)
32+
- name: Install CUDA Toolkit
33+
uses: Jimver/cuda-toolkit@v0.2.29
34+
with:
35+
cuda: "12.3.0"
36+
37+
- name: Verify CUDA
38+
shell: bash
39+
run: |
40+
echo "CUDA_PATH=$CUDA_PATH"
41+
nvcc --version
42+
3343
- name: Install rtxpy
3444
run: |
45+
python -m pip install -U pip
3546
python -m pip install -ve .[tests]
3647
python -m pip list
3748
3849
- name: Run tests
3950
run: |
4051
python -m pytest -v rtxpy/tests
52+

CMakeLists.txt

Lines changed: 38 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
11
cmake_minimum_required(VERSION 3.10)
2-
32
project(rtxpy)
43

54
set(CMAKE_CXX_STANDARD 11)
@@ -8,25 +7,53 @@ set(CMAKE_CXX_STANDARD_REQUIRED ON)
87
add_definitions(-DRTX_EXPORTS)
98

109
if (WIN32)
11-
add_definitions(-D_CRT_SECURE_NO_WARNINGS)
10+
add_definitions(-D_CRT_SECURE_NO_WARNINGS)
1211
endif()
1312

14-
SET(SOURCE_DIR "crtx")
13+
set(SOURCE_DIR "crtx")
1514

1615
set(HEADERS
17-
${SOURCE_DIR}/common.h
18-
${SOURCE_DIR}/internal.h
19-
${SOURCE_DIR}/rtx.h
16+
${SOURCE_DIR}/common.h
17+
${SOURCE_DIR}/internal.h
18+
${SOURCE_DIR}/rtx.h
2019
)
2120

2221
set(SOURCES
23-
${SOURCE_DIR}/dllmain.cpp
24-
${SOURCE_DIR}/cuew/cuew.c
22+
${SOURCE_DIR}/dllmain.cpp
23+
${SOURCE_DIR}/cuew/cuew.c
2524
)
2625

2726
add_library(${PROJECT_NAME} SHARED ${HEADERS} ${SOURCES})
27+
target_compile_definitions(${PROJECT_NAME} PRIVATE CUDA_NO_PROTOTYPES OPTIX_DONT_INCLUDE_CUDA)
28+
29+
# ---- CUDA toolkit path (adjust if yours differs) ----
30+
set(CUDA_TOOLKIT_ROOT_DIR "/usr/local/cuda")
31+
set(CUDA_INCLUDE_DIR "${CUDA_TOOLKIT_ROOT_DIR}/include")
32+
set(CUDA_LIB_DIR "${CUDA_TOOLKIT_ROOT_DIR}/lib64")
33+
34+
target_include_directories(${PROJECT_NAME} PRIVATE
35+
${SOURCE_DIR}
36+
${SOURCE_DIR}/optix_9.1/include
37+
${SOURCE_DIR}/optix_9.1
38+
${SOURCE_DIR}/cuew
39+
${CUDA_INCLUDE_DIR}
40+
)
41+
42+
# Link search paths:
43+
# - CUDA toolkit libs (cudart, etc.) live here
44+
# - WSL provides the NVIDIA driver libcuda.so here
45+
target_link_directories(${PROJECT_NAME} PRIVATE
46+
${CUDA_LIB_DIR}
47+
/usr/lib/wsl/lib
48+
)
49+
50+
target_link_libraries(${PROJECT_NAME} PRIVATE
51+
cuda # libcuda.so (driver API)
52+
dl
53+
pthread
54+
)
2855

29-
target_include_directories(${PROJECT_NAME} PUBLIC
30-
${SOURCE_DIR}/optix_7.1
31-
${SOURCE_DIR}/cuew
56+
# Ensure runtime can find libcuda.so on WSL
57+
target_link_options(${PROJECT_NAME} PRIVATE
58+
"-Wl,-rpath,/usr/lib/wsl/lib"
3259
)

MANIFEST.in

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
11
# Extra files required in sdist
22
include CMakeLists.txt
3-
recursive-include crtx *.h *.c *cpp *.cu *.sh
3+
recursive-include crtx *.h *.c *cpp *.cu *.sh *.ptx
4+
recursive-include rtxpy *.ptx *.so

README.md

Lines changed: 66 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,3 +32,69 @@ To run tests
3232

3333
pip install -ve .[tests]
3434
pytest -v rtxpy/tests
35+
36+
37+
## Building from source:
38+
39+
### Building kernel.ptx
40+
```bash
41+
cd crtx
42+
bash compileOptiX.sh
43+
cp kernel.ptx ../rtxpy
44+
```
45+
46+
### Building `librtxpy.so`
47+
```bash
48+
bash clean_build.sh
49+
cp build/librtxpy.so ./rtxpy
50+
```
51+
52+
### Building on WSL2:
53+
To get the build working on WSL, I followed the post below:
54+
https://forums.developer.nvidia.com/t/problem-running-optix-7-6-in-wsl/239355/8
55+
56+
---------------------
57+
58+
Welcome @chris.schwindt,
59+
60+
I believe we’re not yet packaging OptiX into the WSL2 driver. I believe this is hung up on a redesign of the driver packaging and delivery process, which is why it’s taking such a long time.
61+
62+
I have heard rumors that people have been able to get OptiX to work in WSL2 via manual install. This is unofficial and subject to change, so your mileage may vary, but here are some steps that may work for you:
63+
64+
Running OptiX Applications on WSL 2
65+
Install WSL 2 and enable CUDA
66+
Follow the canonical methods for installing WSL, display driver, and CUDA Toolkit within WSL
67+
68+
As mentioned in the docs, do not install a Linux Display driver in WSL, this will break the mapping of libcuda.
69+
There are CUDA Toolkit downloads specifically for WSL that will not attempt to install a driver, only the toolkit.
70+
You can also deselect the driver in a normal version of the toolkit.
71+
Obtain OptiX / RTCore libraries for Linux
72+
Download and extract libraries from the linux display driver.
73+
You can run the driver installer in WSL using ./[driver filename].run -x which will unpack the driver but not install it.
74+
Copy libnvoptix.so.XXX.00, libnvidia-rtcore.so.XXX.00, and libnvidia-ptxjitcompiler.so.XXX.00 into C:/Windows/System32/lxss/lib where XXX is the driver version.
75+
Rename libnvoptix.so.XX.00 to libnvoptix.so.1
76+
Rename libnvidia-ptxjitcompiler.so.XXX.00 to libnvidia-ptxjitcompiler.so.1
77+
Do not rename libnvidia-rtcore.so.XXX.00
78+
Be aware that future drivers may need additional libraries that will need to be copied.
79+
Building an OptiX Application
80+
You may need to add /usr/local/cuda/bin to your PATH to access NVCC, but do NOT add /usr/local/cuda/lib64 to LD_LIBRARY_PATH as you normally would when installing the CUDA toolkit. libcuda and other libraries are passed through from C:/Windows/System32/lxss/lib where you placed the OptiX and RTCore libs.
81+
Instead, add /usr/lib/wsl/lib to your LD_LIBRARY_PATH to pick up CUDA, OptiX, etc.
82+
Running an OptiX Application
83+
With LD_LIBRARY_PATH set per the previous step, you should be able to run an OptiX executable.
84+
You may need to rebuild the WSL cache. You can do so by quitting any WSL sessions and running wsl --shutdown from Powershell, then starting a new WSL session. Failing to reset the cache may lead to strange load paths.
85+
You may verify paths are correct using strace, e.g., strace -o trace ./bin/optixHello
86+
87+
David.
88+
89+
---------------------
90+
91+
I ended up downloading: https://uk.download.nvidia.com/XFree86/Linux-x86_64/590.44.01/NVIDIA-Linux-x86_64-590.44.01.run
92+
Nvidia Driver: 591.44
93+
94+
I then extract files and followed instructions above
95+
96+
97+
I then extracted
98+
```bash
99+
bash
100+
```

clean_build.sh

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
rm -rf build
2+
mkdir build
3+
cd build
4+
cmake ..
5+
cmake --build . -j

crtx/compileOptiX.sh

Lines changed: 45 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -1,53 +1,64 @@
11
#!/bin/bash
2+
set -euo pipefail
23

34
unameOut="$(uname -s)"
45
case "${unameOut}" in
56
Linux*) machine=Linux;;
67
Darwin*) machine=Mac;;
78
CYGWIN*) machine=Cygwin;;
89
MINGW*) machine=MinGw;;
9-
*) machine="UNKNOWN:${unameOut}"
10+
*) machine="UNKNOWN:${unameOut}";;
1011
esac
1112

12-
if [ ! -d "external/shaders" ]
13-
then
14-
mkdir external/shaders
15-
fi
13+
mkdir -p external/shaders
14+
15+
OPTIX_VERSION=9.1.0
1616

1717
if [ "${machine}" == "Linux" ]
1818
then
19-
echo "Setting up variables for Linux"
20-
export OPTIX_VERSION=7.1.0
21-
export INCLUDES="-I'/<PATH_TO>/NVIDIA-OptiX-SDK-${OPTIX_VERSION}-linux64-x86_64/include'"
22-
export INCLUDES="$INCLUDES -I'../include'"
23-
export INCLUDES="$INCLUDES -I'/usr/local/cuda/samples/common/inc'" #For math_helper.h
24-
export NVCC="/usr/local/cuda/bin/nvcc"
25-
export COMPILER="g++"
26-
else
27-
if [ "${machine}" == "MinGw" ]
28-
then
29-
echo "Setting up variables for Windows (Git Bash)"
30-
31-
export OPTIX_VERSION=7.1.0
32-
export CUDA_VERSION=11.4
33-
export INCLUDES=(-I"/c/ProgramData/NVIDIA Corporation/OptiX SDK $OPTIX_VERSION/include" -I"../include" -I"/c/ProgramData/NVIDIA Corporation/CUDA Samples/v${CUDA_VERSION}/common/inc")
34-
export NVCC="/c/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v${CUDA_VERSION}/bin/nvcc"
35-
# You may need to update the path to a valid compiler. This points to MSVS 2019 compiler
36-
export COMPILER="/c/Program Files (x86)/Microsoft Visual Studio/2019/BuildTools/VC/Tools/MSVC/14.29.30037/bin/Hostx64/x64"
37-
else
38-
echo "Unsupported OS : ${machine}"
39-
fi
40-
fi
19+
echo "Setting up variables for Linux"
4120

42-
echo "Compiling for OptiX $OPTIX_VERSION"
43-
echo "NVCC compiler currently set: $NVCC"
44-
echo "C++ compiler currently set: $COMPILER"
21+
NVCC="/usr/local/cuda/bin/nvcc"
22+
COMPILER="g++"
4523

46-
export NVCC_FLAGS="-m64 --std c++11 --use_fast_math -cudart static -arch sm_50 -Xptxas -v"
24+
INCLUDES=(
25+
-I"./optix_9.1" # <-- OptiX 9.1 headers vendored in this repo
26+
-I"../include"
27+
-I"/usr/local/cuda/samples/common/inc" # For helper_math.h / math_helper.h (CUDA samples)
28+
)
4729

48-
if [ -f "kernel.ptx" ]
30+
elif [ "${machine}" == "MinGw" ]
4931
then
50-
rm kernel.ptx
32+
echo "Setting up variables for Windows (Git Bash)"
33+
34+
CUDA_VERSION=11.4
35+
INCLUDES=(
36+
-I"./optix_7.1" # <-- also use vendored headers on Windows
37+
-I"../include"
38+
-I"/c/ProgramData/NVIDIA Corporation/CUDA Samples/v${CUDA_VERSION}/common/inc"
39+
)
40+
41+
NVCC="/c/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v${CUDA_VERSION}/bin/nvcc"
42+
COMPILER="/c/Program Files (x86)/Microsoft Visual Studio/2019/BuildTools/VC/Tools/MSVC/14.29.30037/bin/Hostx64/x64"
43+
else
44+
echo "Unsupported OS : ${machine}"
45+
exit 1
5146
fi
5247

53-
exec "$NVCC" $NVCC_FLAGS -ccbin "$COMPILER" "${INCLUDES[@]}" -ptx -o kernel.ptx kernel.cu >> cudaoutput.txt | tee
48+
echo "Compiling for OptiX ${OPTIX_VERSION}"
49+
echo "NVCC compiler currently set: ${NVCC}"
50+
echo "C++ compiler currently set: ${COMPILER}"
51+
52+
NVCC_FLAGS=(
53+
-m64
54+
--std=c++11
55+
--use_fast_math
56+
-cudart=static
57+
-arch=sm_86
58+
-Xptxas -v
59+
)
60+
61+
rm -f kernel.ptx
62+
63+
exec "${NVCC}" "${NVCC_FLAGS[@]}" -ccbin "${COMPILER}" "${INCLUDES[@]}" -ptx -o kernel.ptx kernel.cu \
64+
>> cudaoutput.txt | tee

crtx/cuew/cuew.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@
2525
# endif
2626
#endif
2727

28-
#include <cuew.h>
28+
#include <cuew/cuew.h>
2929
#include <assert.h>
3030
#include <stdio.h>
3131
#include <string.h>

0 commit comments

Comments
 (0)