Python torch.utils.ffi.create_extension() Examples
The following are 5
code examples of torch.utils.ffi.create_extension().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch.utils.ffi
, or try the search function
.
Example #1
Source File: build_ffi.py From Relation-Shape-CNN with MIT License | 6 votes |
def build(args): extra_objects = args.objs extra_objects += [a for a in glob.glob('/usr/local/cuda/lib64/*.a')] ffi = create_extension( '_ext.pointnet2', headers=[a for a in glob.glob("cinclude/*_wrapper.h")], sources=[a for a in glob.glob("csrc/*.c")], define_macros=[('WITH_CUDA', None)], relative_to=__file__, with_cuda=True, extra_objects=extra_objects, include_dirs=[osp.join(base_dir, 'cinclude')], verbose=False, package=False ) ffi.build()
Example #2
Source File: build_ffi.py From DensePoint with MIT License | 6 votes |
def build(args): extra_objects = args.objs extra_objects += [a for a in glob.glob('/usr/local/cuda/lib64/*.a')] ffi = create_extension( '_ext.pointnet2', headers=[a for a in glob.glob("cinclude/*_wrapper.h")], sources=[a for a in glob.glob("csrc/*.c")], define_macros=[('WITH_CUDA', None)], relative_to=__file__, with_cuda=True, extra_objects=extra_objects, include_dirs=[osp.join(base_dir, 'cinclude')], verbose=False, package=False ) ffi.build()
Example #3
Source File: build_ffi.py From sanet_relocal_demo with GNU General Public License v3.0 | 6 votes |
def build(args): extra_objects = args.objs extra_objects += [a for a in glob.glob('/usr/local/cuda/lib64/*.a')] ffi = create_extension( '_ext.pointnet2', headers=[a for a in glob.glob("cinclude/*_wrapper.h")], sources=[a for a in glob.glob("csrc/*.c")], define_macros=[('WITH_CUDA', None)], relative_to=__file__, with_cuda=True, extra_objects=extra_objects, include_dirs=[osp.join(base_dir, 'cinclude')], verbose=False, package=False ) ffi.build()
Example #4
Source File: setup.py From training_results_v0.6 with Apache License 2.0 | 5 votes |
def is_torch_cuda(): try: from torch.utils.ffi import create_extension cuda_test_ext = create_extension( name='horovod.torch.test_cuda', headers=['horovod/torch/dummy.h'], sources=[], with_cuda=True, extra_compile_args=['-std=c11', '-fPIC', '-O2'] ) cuda_test_ext.build() return True except: print('INFO: Above error indicates that this PyTorch installation does not support CUDA.') return False
Example #5
Source File: setup.py From training_results_v0.6 with Apache License 2.0 | 4 votes |
def build_torch_extension(build_ext, options, abi_compile_flags): check_torch_import() have_cuda = is_torch_cuda() if not have_cuda and check_macro(options['MACROS'], 'HAVE_CUDA'): raise DistutilsPlatformError( 'Horovod build with GPU support was requested, but this PyTorch ' 'installation does not support CUDA.') # Update HAVE_CUDA to mean that PyTorch supports CUDA. Internally, we will be checking # HOROVOD_GPU_(ALLREDUCE|ALLGATHER|BROADCAST) to decide whether we should use GPU # version or transfer tensors to CPU memory for those operations. updated_macros = set_macro( options['MACROS'], 'HAVE_CUDA', str(int(have_cuda))) # Create_extension overwrites these files which are customized, we need to protect them. with protect_files('horovod/torch/mpi_lib/__init__.py', 'horovod/torch/mpi_lib_impl/__init__.py'): from torch.utils.ffi import create_extension ffi_iface = create_extension( name='horovod.torch.mpi_lib', headers=['horovod/torch/interface.h'] + (['horovod/torch/interface_cuda.h'] if have_cuda else []), with_cuda=have_cuda, language='c', package=True, sources=[], extra_compile_args=['-std=c11', '-fPIC', '-O2'] ) ffi_impl = create_extension( name='horovod.torch.mpi_lib_impl', headers=[], with_cuda=have_cuda, language='c++', package=True, source_extension='.cc', define_macros=updated_macros, include_dirs=options['INCLUDES'], sources=options['SOURCES'] + ['horovod/torch/mpi_ops.cc', 'horovod/torch/handle_manager.cc', 'horovod/torch/ready_event.cc', 'horovod/torch/tensor_util.cc', 'horovod/torch/cuda_util.cc', 'horovod/torch/adapter.cc'], extra_compile_args=options['COMPILE_FLAGS'] + abi_compile_flags, extra_link_args=options['LINK_FLAGS'], library_dirs=options['LIBRARY_DIRS'], libraries=options['LIBRARIES'] ) for ffi, setuptools_ext in [(ffi_iface, torch_mpi_lib), (ffi_impl, torch_mpi_lib_impl)]: ffi_ext = ffi.distutils_extension() # ffi_ext is distutils Extension, not setuptools Extension for k, v in ffi_ext.__dict__.items(): setuptools_ext.__dict__[k] = v build_ext.build_extension(setuptools_ext) # run the customize_compiler