Python setuptools.command.build_ext.build_ext.build_extension() Examples

The following are 9 code examples of setuptools.command.build_ext.build_ext.build_extension(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module setuptools.command.build_ext.build_ext , or try the search function .
Example #1
Source File: setup.py    From pymoo with Apache License 2.0 6 votes vote down vote up
def construct_build_ext(build_ext):
    class WrappedBuildExt(build_ext):
        def run(self):
            try:
                build_ext.run(self)
            except BaseException as e:
                raise CompilingFailed(e)

        def build_extension(self, ext):
            try:
                build_ext.build_extension(self, ext)
            except BaseException as e:
                raise CompilingFailed(e)

    return WrappedBuildExt


# ============================================================
# SETUP
# ============================================================ 
Example #2
Source File: setup.py    From training_results_v0.6 with Apache License 2.0 6 votes vote down vote up
def build_tf_extension(build_ext, options):
    check_tf_version()
    tf_compile_flags, tf_link_flags = get_tf_flags(
        build_ext, options['COMPILE_FLAGS'])

    tensorflow_mpi_lib.define_macros = options['MACROS']
    tensorflow_mpi_lib.include_dirs = options['INCLUDES']
    tensorflow_mpi_lib.sources = options['SOURCES'] + \
        ['horovod/tensorflow/mpi_ops.cc']
    tensorflow_mpi_lib.extra_compile_args = options['COMPILE_FLAGS'] + \
        tf_compile_flags
    tensorflow_mpi_lib.extra_link_args = options['LINK_FLAGS'] + tf_link_flags
    tensorflow_mpi_lib.library_dirs = options['LIBRARY_DIRS']
    tensorflow_mpi_lib.libraries = options['LIBRARIES']

    build_ext.build_extension(tensorflow_mpi_lib)

    # Return ABI flags used for TensorFlow compilation.  We will use this flag
    # to compile all the libraries.
    return [flag for flag in tf_compile_flags if '_GLIBCXX_USE_CXX11_ABI' in flag] 
Example #3
Source File: setup.py    From knitlib with GNU Lesser General Public License v3.0 5 votes vote down vote up
def build_extension(self, ext):
        try:
            build_ext.build_extension(self, ext)
        except (CCompilerError, CompileError, DistutilsExecError) as e:
            self._unavailable(e)
            self.extensions = []  # avoid copying missing files (it would fail). 
Example #4
Source File: setup.py    From training_results_v0.6 with Apache License 2.0 5 votes vote down vote up
def build_common_extension(build_ext, options, abi_compile_flags):
    common_mpi_lib.define_macros = options['MACROS']
    common_mpi_lib.include_dirs = options['INCLUDES']
    common_mpi_lib.sources = options['SOURCES'] + ['horovod/common/common.cc',
                                                   'horovod/common/mpi_message.cc',
                                                   'horovod/common/operations.cc',
                                                   'horovod/common/timeline.cc']
    common_mpi_lib.extra_compile_args = options['COMPILE_FLAGS'] + \
        abi_compile_flags
    common_mpi_lib.extra_link_args = options['LINK_FLAGS']
    common_mpi_lib.library_dirs = options['LIBRARY_DIRS']
    common_mpi_lib.libraries = options['LIBRARIES']

    build_ext.build_extension(common_mpi_lib) 
Example #5
Source File: setup.py    From uarray with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def build_extension(self, ext):
        cc = self.compiler
        if cc.compiler_type == "unix":
            ext.extra_compile_args.append("--std=c++11")
        if self.plat_name.startswith("macosx"):
            ext.extra_compile_args.append("-mmacosx-version-min=10.9")
            ext.extra_link_args.append("-mmacosx-version-min=10.9")
        build_ext.build_extension(self, ext) 
Example #6
Source File: setup.py    From aredis with MIT License 5 votes vote down vote up
def build_extension(self, ext):
        name = ext.name
        try:
            build_ext.build_extension(self, ext)
        except Exception:
            e = sys.exc_info()[1]
            sys.stdout.write('%s\n' % str(e))
            warnings.warn(self.warning_message % ("The %s extension "
                                                  "module" % (name,),
                                                  "The output above "
                                                  "this warning shows how "
                                                  "the compilation "
                                                  "failed.")) 
Example #7
Source File: setup.py    From deap with GNU Lesser General Public License v3.0 5 votes vote down vote up
def build_extension(self, ext):
        try:
            build_ext.build_extension(self, ext)
        except ext_errors as e:
            print(e)
            raise BuildFailed() 
Example #8
Source File: setup.py    From Fast_Sentence_Embeddings with GNU General Public License v3.0 5 votes vote down vote up
def build_extension(self, ext):
        name = ext.name
        try:
            build_ext.build_extension(self, ext)
        except Exception:
            e = sys.exc_info()[1]
            sys.stdout.write('%s\n' % str(e))
            warnings.warn(
                self.warning_message +
                'The %s extension module' % (name,) +
                'The output above this warning shows how the compilation failed.') 
Example #9
Source File: setup.py    From training_results_v0.6 with Apache License 2.0 4 votes vote down vote up
def build_torch_extension(build_ext, options, abi_compile_flags):
    check_torch_import()

    have_cuda = is_torch_cuda()
    if not have_cuda and check_macro(options['MACROS'], 'HAVE_CUDA'):
        raise DistutilsPlatformError(
            'Horovod build with GPU support was requested, but this PyTorch '
            'installation does not support CUDA.')

    # Update HAVE_CUDA to mean that PyTorch supports CUDA. Internally, we will be checking
    # HOROVOD_GPU_(ALLREDUCE|ALLGATHER|BROADCAST) to decide whether we should use GPU
    # version or transfer tensors to CPU memory for those operations.
    updated_macros = set_macro(
        options['MACROS'], 'HAVE_CUDA', str(int(have_cuda)))

    # Create_extension overwrites these files which are customized, we need to protect them.
    with protect_files('horovod/torch/mpi_lib/__init__.py',
                       'horovod/torch/mpi_lib_impl/__init__.py'):
        from torch.utils.ffi import create_extension
        ffi_iface = create_extension(
            name='horovod.torch.mpi_lib',
            headers=['horovod/torch/interface.h'] +
            (['horovod/torch/interface_cuda.h'] if have_cuda else []),
            with_cuda=have_cuda,
            language='c',
            package=True,
            sources=[],
            extra_compile_args=['-std=c11', '-fPIC', '-O2']
        )
        ffi_impl = create_extension(
            name='horovod.torch.mpi_lib_impl',
            headers=[],
            with_cuda=have_cuda,
            language='c++',
            package=True,
            source_extension='.cc',
            define_macros=updated_macros,
            include_dirs=options['INCLUDES'],
            sources=options['SOURCES'] + ['horovod/torch/mpi_ops.cc',
                                          'horovod/torch/handle_manager.cc',
                                          'horovod/torch/ready_event.cc',
                                          'horovod/torch/tensor_util.cc',
                                          'horovod/torch/cuda_util.cc',
                                          'horovod/torch/adapter.cc'],
            extra_compile_args=options['COMPILE_FLAGS'] + abi_compile_flags,
            extra_link_args=options['LINK_FLAGS'],
            library_dirs=options['LIBRARY_DIRS'],
            libraries=options['LIBRARIES']
        )

    for ffi, setuptools_ext in [(ffi_iface, torch_mpi_lib),
                                (ffi_impl, torch_mpi_lib_impl)]:
        ffi_ext = ffi.distutils_extension()
        # ffi_ext is distutils Extension, not setuptools Extension
        for k, v in ffi_ext.__dict__.items():
            setuptools_ext.__dict__[k] = v
        build_ext.build_extension(setuptools_ext)


# run the customize_compiler